hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e2932f0cf6a4ecc325d1164487077f622c521d4
| 11,505
|
py
|
Python
|
src/django-nonrel/tests/regressiontests/httpwrappers/tests.py
|
adamjmcgrath/glancydesign
|
826ede7c639879d5b79ee730eb5e91422768cb02
|
[
"BSD-3-Clause"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
tests/regressiontests/httpwrappers/tests.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
tests/regressiontests/httpwrappers/tests.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
import copy
import pickle
from django.http import (QueryDict, HttpResponse, SimpleCookie, BadHeaderError,
parse_cookie)
from django.utils import unittest
class QueryDictTests(unittest.TestCase):
def test_missing_key(self):
q = QueryDict('')
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict('')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict('')
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict('')
self.assertEqual(q.getlist('foo'), [])
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [])
self.assertEqual(q.lists(), [])
self.assertEqual(q.items(), [])
self.assertEqual(q.keys(), [])
self.assertEqual(q.values(), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict('foo=bar')
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertFalse(q.has_key('bar'))
self.assertFalse('bar' in q)
self.assertEqual(q.items(), [(u'foo', u'bar')])
self.assertEqual(q.lists(), [(u'foo', [u'bar'])])
self.assertEqual(q.keys(), ['foo'])
self.assertEqual(q.values(), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict('', mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict('', mutable=True)
q['next'] = u'/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict('').copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict('').copy()
q['name'] = 'john'
del q['name']
self.assertFalse('name' in q)
def test_basic_mutable_operations(self):
q = QueryDict('').copy()
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')])
self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])])
self.assertEqual(q.keys(), [u'foo', u'name'])
self.assertEqual(q.values(), [u'another', u'john'])
self.assertEqual(len(q), 2)
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.urlencode(), 'foo=bar&name=john')
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict('vote=yes&vote=no')
self.assertEqual(q['vote'], u'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), u'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), [u'yes', u'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [(u'vote', u'no')])
self.assertEqual(q.lists(), [(u'vote', [u'yes', u'no'])])
self.assertEqual(q.keys(), [u'vote'])
self.assertEqual(q.values(), [u'no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding).
"""
q = QueryDict('foo=bar&foo=\xff')
self.assertEqual(q['foo'], u'\ufffd')
self.assertEqual(q.getlist('foo'), [u'bar', u'\ufffd'])
def test_pickle(self):
q = QueryDict('')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d&a=1')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1 , True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist('a'), [u'1', u'2', u'3', u'4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict('sbb=one', encoding='rot_13')
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
q = q.copy()
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
self.assertEqual(copy.copy(q).encoding , 'rot_13' )
self.assertEqual(copy.deepcopy(q).encoding , 'rot_13')
class HttpResponseTests(unittest.TestCase):
def test_unicode_headers(self):
r = HttpResponse()
# If we insert a unicode value it will be converted to an ascii
r['value'] = u'test value'
self.assertTrue(isinstance(r['value'], str))
# An error is raised ~hen a unicode object with non-ascii is assigned.
self.assertRaises(UnicodeEncodeError, r.__setitem__, 'value', u't\xebst value')
# An error is raised when a unicode object with non-ASCII format is
# passed as initial mimetype or content_type.
self.assertRaises(UnicodeEncodeError, HttpResponse,
mimetype=u't\xebst value')
# HttpResponse headers must be convertible to ASCII.
self.assertRaises(UnicodeEncodeError, HttpResponse,
content_type=u't\xebst value')
# The response also converts unicode keys to strings.)
r[u'test'] = 'testing key'
l = list(r.items())
l.sort()
self.assertEqual(l[1], ('test', 'testing key'))
# It will also raise errors for keys with non-ascii data.
self.assertRaises(UnicodeEncodeError, r.__setitem__, u't\xebst key', 'value')
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
# Python 2.4 compatibility note: Python 2.4's cookie implementation
# always returns Set-Cookie headers terminating in semi-colons.
# That's not the bug this test is looking for, so ignore it.
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertTrue(";" not in c.output().rstrip(';')) # IE compat
self.assertTrue("," not in c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = "\xf0"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
| 40.510563
| 99
| 0.597132
|
3c81713102c4fc8e77eff69b1c1ce15706a59405
| 2,315
|
py
|
Python
|
test/functional/feature_config_args.py
|
YEPCOIN/YEPCOIN
|
5df02cce9bb76caee82cf832e1c35e37de799e81
|
[
"MIT"
] | null | null | null |
test/functional/feature_config_args.py
|
YEPCOIN/YEPCOIN
|
5df02cce9bb76caee82cf832e1c35e37de799e81
|
[
"MIT"
] | 1
|
2020-10-16T20:08:05.000Z
|
2020-10-17T03:56:34.000Z
|
test/functional/feature_config_args.py
|
YEPCOIN/YEPCOIN
|
5df02cce9bb76caee82cf832e1c35e37de799e81
|
[
"MIT"
] | 1
|
2020-04-30T07:52:02.000Z
|
2020-04-30T07:52:02.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import YepTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(YepTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "yep.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
| 46.3
| 169
| 0.692441
|
d5fd94645d4e01afd7375d97695a60a4b70de3c0
| 8,476
|
py
|
Python
|
patroni/__init__.py
|
pmarcinkiewicz1/patroni
|
16d1ffdde775a12c43d9b9365a169ad9fb20bc95
|
[
"MIT"
] | null | null | null |
patroni/__init__.py
|
pmarcinkiewicz1/patroni
|
16d1ffdde775a12c43d9b9365a169ad9fb20bc95
|
[
"MIT"
] | 7
|
2021-04-09T01:17:52.000Z
|
2021-04-09T01:18:02.000Z
|
patroni/__init__.py
|
rammatzkvosky/patroni
|
cf007d61a0220bc08535af3a5b3c12565a899b48
|
[
"MIT"
] | null | null | null |
import logging
import os
import signal
import sys
import time
from patroni.version import __version__
logger = logging.getLogger(__name__)
PATRONI_ENV_PREFIX = 'PATRONI_'
class Patroni(object):
def __init__(self, conf):
from patroni.api import RestApiServer
from patroni.dcs import get_dcs
from patroni.ha import Ha
from patroni.log import PatroniLogger
from patroni.postgresql import Postgresql
from patroni.request import PatroniRequest
from patroni.watchdog import Watchdog
self.setup_signal_handlers()
self.version = __version__
self.logger = PatroniLogger()
self.config = conf
self.logger.reload_config(self.config.get('log', {}))
self.dcs = get_dcs(self.config)
self.watchdog = Watchdog(self.config)
self.load_dynamic_configuration()
self.postgresql = Postgresql(self.config['postgresql'])
self.api = RestApiServer(self, self.config['restapi'])
self.request = PatroniRequest(self.config, True)
self.ha = Ha(self)
self.tags = self.get_tags()
self.next_run = time.time()
self.scheduled_restart = {}
def load_dynamic_configuration(self):
from patroni.exceptions import DCSError
while True:
try:
cluster = self.dcs.get_cluster()
if cluster and cluster.config and cluster.config.data:
if self.config.set_dynamic_configuration(cluster.config):
self.dcs.reload_config(self.config)
self.watchdog.reload_config(self.config)
elif not self.config.dynamic_configuration and 'bootstrap' in self.config:
if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']):
self.dcs.reload_config(self.config)
break
except DCSError:
logger.warning('Can not get cluster from dcs')
time.sleep(5)
def get_tags(self):
return {tag: value for tag, value in self.config.get('tags', {}).items()
if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value}
@property
def nofailover(self):
return bool(self.tags.get('nofailover', False))
@property
def nosync(self):
return bool(self.tags.get('nosync', False))
def reload_config(self, sighup=False):
try:
self.tags = self.get_tags()
self.logger.reload_config(self.config.get('log', {}))
self.watchdog.reload_config(self.config)
if sighup:
self.request.reload_config(self.config)
self.api.reload_config(self.config['restapi'])
self.postgresql.reload_config(self.config['postgresql'], sighup)
self.dcs.reload_config(self.config)
except Exception:
logger.exception('Failed to reload config_file=%s', self.config.config_file)
@property
def replicatefrom(self):
return self.tags.get('replicatefrom')
def sighup_handler(self, *args):
self._received_sighup = True
def sigterm_handler(self, *args):
with self._sigterm_lock:
if not self._received_sigterm:
self._received_sigterm = True
sys.exit()
@property
def noloadbalance(self):
return bool(self.tags.get('noloadbalance', False))
def schedule_next_run(self):
self.next_run += self.dcs.loop_wait
current_time = time.time()
nap_time = self.next_run - current_time
if nap_time <= 0:
self.next_run = current_time
# Release the GIL so we don't starve anyone waiting on async_executor lock
time.sleep(0.001)
# Warn user that Patroni is not keeping up
logger.warning("Loop time exceeded, rescheduling immediately.")
elif self.ha.watch(nap_time):
self.next_run = time.time()
@property
def received_sigterm(self):
with self._sigterm_lock:
return self._received_sigterm
def run(self):
self.api.start()
self.logger.start()
self.next_run = time.time()
while not self.received_sigterm:
if self._received_sighup:
self._received_sighup = False
if self.config.reload_local_configuration():
self.reload_config(True)
else:
self.postgresql.config.reload_config(self.config['postgresql'], True)
logger.info(self.ha.run_cycle())
if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \
and self.config.set_dynamic_configuration(self.dcs.cluster.config):
self.reload_config()
if self.postgresql.role != 'uninitialized':
self.config.save_cache()
self.schedule_next_run()
def setup_signal_handlers(self):
from threading import Lock
self._received_sighup = False
self._sigterm_lock = Lock()
self._received_sigterm = False
if os.name != 'nt':
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
def shutdown(self):
with self._sigterm_lock:
self._received_sigterm = True
try:
self.api.shutdown()
except Exception:
logger.exception('Exception during RestApi.shutdown')
self.ha.shutdown()
self.logger.shutdown()
def patroni_main():
import argparse
from patroni.config import Config, ConfigParseError
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__))
parser.add_argument('configfile', nargs='?', default='',
help='Patroni may also read the configuration from the {0} environment variable'
.format(Config.PATRONI_CONFIG_VARIABLE))
args = parser.parse_args()
try:
conf = Config(args.configfile)
except ConfigParseError as e:
if e.value:
print(e.value)
parser.print_help()
sys.exit(1)
patroni = Patroni(conf)
try:
patroni.run()
except KeyboardInterrupt:
pass
finally:
patroni.shutdown()
def fatal(string, *args):
sys.stderr.write('FATAL: ' + string.format(*args) + '\n')
sys.exit(1)
def check_psycopg2():
min_psycopg2 = (2, 5, 4)
min_psycopg2_str = '.'.join(map(str, min_psycopg2))
def parse_version(version):
for e in version.split('.'):
try:
yield int(e)
except ValueError:
break
try:
import psycopg2
version_str = psycopg2.__version__.split(' ')[0]
version = tuple(parse_version(version_str))
if version < min_psycopg2:
fatal('Patroni requires psycopg2>={0}, but only {1} is available', min_psycopg2_str, version_str)
except ImportError:
fatal('Patroni requires psycopg2>={0} or psycopg2-binary', min_psycopg2_str)
def main():
if os.getpid() != 1:
check_psycopg2()
return patroni_main()
# Patroni started with PID=1, it looks like we are in the container
pid = 0
# Looks like we are in a docker, so we will act like init
def sigchld_handler(signo, stack_frame):
try:
while True:
ret = os.waitpid(-1, os.WNOHANG)
if ret == (0, 0):
break
elif ret[0] != pid:
logger.info('Reaped pid=%s, exit status=%s', *ret)
except OSError:
pass
def passtochild(signo, stack_frame):
if pid:
os.kill(pid, signo)
if os.name != 'nt':
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGHUP, passtochild)
signal.signal(signal.SIGQUIT, passtochild)
signal.signal(signal.SIGUSR1, passtochild)
signal.signal(signal.SIGUSR2, passtochild)
signal.signal(signal.SIGINT, passtochild)
signal.signal(signal.SIGABRT, passtochild)
signal.signal(signal.SIGTERM, passtochild)
import multiprocessing
patroni = multiprocessing.Process(target=patroni_main)
patroni.start()
pid = patroni.pid
patroni.join()
| 32.980545
| 109
| 0.611609
|
feb22d45da9d4bf56ed1a994f302c799788dfbf3
| 58,313
|
py
|
Python
|
jax/interpreters/xla.py
|
sunilkpai/jax
|
5bbb449ae5849c508194c8eb5b10c101f1fa22ae
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-06-18T08:19:12.000Z
|
2021-06-18T08:19:12.000Z
|
jax/interpreters/xla.py
|
yang-song/jax
|
a6b2c371d28f49a972814ffec03addb6773621b7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/xla.py
|
yang-song/jax
|
a6b2c371d28f49a972814ffec03addb6773621b7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, deque
import itertools as it
import operator as op
from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Type,
Tuple, Union, NamedTuple)
from warnings import warn
from absl import logging
import numpy as np
from ..config import flags, bool_env, config
from .. import core
from .. import ad_util
from .. import dtypes
from .. import lazy
from .. import linear_util as lu
from jax._src import source_info_util
from ..abstract_arrays import (make_shaped_array, array_types)
from ..core import (ConcreteArray, ShapedArray, AbstractToken,
Literal, pp_eqn_compact, raise_to_shaped, abstract_token)
from jax._src.pprint_util import pp
from .._src.util import (partial, partialmethod, cache, prod, unzip2,
extend_name_stack, wrap_name, safe_zip, safe_map)
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from . import partial_eval as pe
from . import ad
from . import masking
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
xe = xc._xla
xops = xc._xla.ops
# Types
Backend = Any # xc.LocalBackend (why does mypy not like this?)
Device = Any # xc.Device
PyLocalBuffer = Any
XlaOp = Any # xla_extension.XlaOp
XlaShape = Any # xla_client.Shape
XlaComputationBuilder = Any # xla_bridge._JaxComputationBuilder
XlaExecutable = Any # xla_extension.LocalExecutable
FLAGS = flags.FLAGS
flags.DEFINE_bool('jax_debug_nans',
bool_env('JAX_DEBUG_NANS', False),
'Add nan checks to every operation.')
flags.DEFINE_bool('jax_debug_infs',
bool_env('JAX_DEBUG_INFS', False),
'Add inf checks to every operation.')
flags.DEFINE_bool('jax_log_compiles',
bool_env('JAX_LOG_COMPILES', False),
'Print a message each time a `jit` computation is compiled.')
# This flag is set on exit; no logging should be attempted
_on_exit = False
def identity(x): return x
_scalar_types = dtypes.python_scalar_dtypes.keys()
# unit representation
def _make_unit_constant(c): return xb.constant(c, np.zeros((), dtype=np.dtype('bool')))
def _make_unit_shape(_): return (xc.Shape.array_shape(np.dtype('bool'), ()),)
def _device_put_unit(_, device):
backend = xb.get_device_backend(device)
return (backend.buffer_from_pyval(np.zeros((), dtype=np.dtype('bool')),
device),)
def _make_array_shape(a):
if a.dtype is dtypes.float0:
return (xc.Shape.array_shape(np.dtype('bool'), a.shape),)
else:
return (xc.Shape.array_shape(a.dtype, a.shape),)
### handlers
xb.register_constant_handler(core.Unit, lambda c, *_: _make_unit_constant(c))
def aval_to_xla_shapes(aval):
try:
return xla_shape_handlers[type(aval)](aval)
except KeyError as err:
raise TypeError(f"No xla_shape_handler for type: {type(aval)}") from err
xla_shape_handlers: Dict[Type[core.AbstractValue], Callable] = {
core.AbstractUnit: _make_unit_shape,
ShapedArray: _make_array_shape,
ConcreteArray: _make_array_shape,
}
def aval_to_result_handler(device: Optional[Device], aval: core.AbstractValue) -> Callable:
try:
return xla_result_handlers[type(aval)](device, aval)
except KeyError as err:
raise TypeError(f"No xla_result_handler for type: {type(aval)}") from err
def array_result_handler(device: Optional[Device], aval: core.ShapedArray):
if aval.dtype is dtypes.float0:
return lambda _: np.zeros(aval.shape, dtypes.float0)
return partial(make_device_array, raise_to_shaped(aval), device,
lazy.array(aval.shape))
xla_result_handlers: Dict[Type[core.AbstractValue], Callable[..., Callable]] = {
core.AbstractUnit: lambda _, __: lambda _: core.unit,
ShapedArray: array_result_handler,
ConcreteArray: array_result_handler,
}
def device_put(x, device: Optional[Device] = None) -> Tuple[Any]:
x = canonicalize_dtype(x)
try:
return device_put_handlers[type(x)](x, device)
except KeyError as err:
raise TypeError(f"No device_put handler for type: {type(x)}") from err
def _device_put_array(x, device: Optional[Device]):
backend = xb.get_device_backend(device)
if x.dtype is dtypes.float0:
x = np.zeros(x.shape, dtype=np.dtype(bool))
return (backend.buffer_from_pyval(x, device),)
def _device_put_scalar(x, device):
return _device_put_array(dtypes.coerce_to_array(x), device)
device_put_handlers: Dict[Any, Callable[[Any, Optional[Device]], Tuple[Any]]] = {
core.Unit: _device_put_unit
}
device_put_handlers.update((t, _device_put_array) for t in array_types)
device_put_handlers.update((t, _device_put_scalar) for t in _scalar_types)
# TODO(mattjj): try to remove this canonicalize_dtype stuff
def canonicalize_dtype(x):
typ = type(x)
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
for typ in typ.mro():
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
if hasattr(x, '__jax_array__'):
return canonicalize_dtype(x.__jax_array__())
raise TypeError(f"No canonicalize_dtype handler for type: {type(x)}")
def _canonicalize_ndarray_dtype(x):
return np.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))
def _canonicalize_python_scalar_dtype(typ, x):
return np.asarray(
x, dtypes.canonicalize_dtype(dtypes.python_scalar_dtypes[typ]))
canonicalize_dtype_handlers: Dict[Any, Callable] = {core.Unit: identity}
canonicalize_dtype_handlers.update(
(t, _canonicalize_ndarray_dtype) for t in array_types)
canonicalize_dtype_handlers.update(
(t, partial(_canonicalize_python_scalar_dtype, t)) for t in _scalar_types)
def abstractify(x) -> core.AbstractValue:
typ = type(x)
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
for typ in typ.mro():
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
if hasattr(x, '__jax_array__'):
return abstractify(x.__jax_array__())
raise TypeError(f"Argument '{x}' of type '{type(x)}' is not a valid JAX type")
def _make_abstract_python_scalar(typ, _):
return ShapedArray((), dtypes.python_scalar_dtypes[typ], weak_type=True)
pytype_aval_mappings: Dict[Any, Callable[[Any], core.AbstractValue]] = {
core.Unit: lambda _: core.abstract_unit,
}
pytype_aval_mappings.update((t, make_shaped_array) for t in array_types)
pytype_aval_mappings.update(
(t, partial(_make_abstract_python_scalar, t)) for t in _scalar_types)
# We can optionally set a Jaxpr rewriter that can be applied just before
# compilation. This mechanism is used for compiling id_tap, we can
# remove it once we bring the id_tap implementation into the core.
outfeed_rewriter: Optional[Callable[[core.Jaxpr], core.Jaxpr]] = None
def apply_outfeed_rewriter(jaxpr: core.Jaxpr) -> core.Jaxpr:
if outfeed_rewriter is not None:
return outfeed_rewriter(jaxpr)
else:
return jaxpr
outfeed_primitives: Set[core.Primitive] = set()
def jaxpr_uses_outfeed(jaxpr: core.Jaxpr) -> bool:
"""Finds if there are outfeed primitives anywhere inside a Jaxpr."""
return any(primitive_uses_outfeed(eqn.primitive, eqn.params)
for eqn in jaxpr.eqns)
def _param_uses_outfeed(param):
if type(param) is core.Jaxpr:
if jaxpr_uses_outfeed(param):
return True
elif type(param) is core.ClosedJaxpr:
if jaxpr_uses_outfeed(param.jaxpr):
return True
return False
def primitive_uses_outfeed(prim: core.Primitive, params: Dict) -> bool:
if prim in outfeed_primitives:
return True
for param in params.values():
if isinstance(param, tuple):
if any(unsafe_map(_param_uses_outfeed, param)):
return True
elif _param_uses_outfeed(param):
return True
return False
### op-by-op execution
def arg_spec(x):
aval = abstractify(x)
try:
return aval, x._device
except:
return aval, None
def apply_primitive(prim, *args, **params):
"""Impl rule that compiles and runs a single primitive 'prim' using XLA."""
compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args), **params)
return compiled_fun(*args)
def _partition_outputs(avals, outs):
nouts = [aval._num_buffers for aval in avals]
if not core.skip_checks:
assert sum(nouts) == len(outs), f"Internal error: sum(nouts)={sum(nouts)} should equal len(outs)={len(outs)}."
outs = iter(outs)
return [[next(outs) for _ in range(nout)] for nout in nouts]
@cache()
def xla_primitive_callable(prim, *arg_specs: Tuple[core.AbstractValue,
Optional[Device]], **params):
avals, arg_devices = unzip2(arg_specs)
donated_invars = (False,) * len(arg_specs)
device = _device_from_arg_devices(arg_devices)
backend = xb.get_device_backend(device)
if primitive_uses_outfeed(prim, params):
# We use the _xla_callable path, where we pre-process the primitives
def prim_fun(*args):
return prim.bind(*args, **params)
return _xla_callable(lu.wrap_init(prim_fun), device, None, "prim", donated_invars,
*arg_specs)
aval_out = prim.abstract_eval(*avals, **params)
if not prim.multiple_results:
handle_result = aval_to_result_handler(device, aval_out)
else:
handlers = map(partial(aval_to_result_handler, device), aval_out)
handle_result = lambda *bufs:\
tuple(handler(*bs) for handler, bs in zip(handlers, _partition_outputs(aval_out, bufs)))
tuple_args = len(avals) > 100
if prim in initial_style_translations:
nreps = initial_style_primitive_replicas(params)
else:
nreps = 1
if nreps > xb.device_count(backend):
raise ValueError(
f"compiling a primitive computation `{prim}` that requires {nreps} "
f"replicas, but only {xb.device_count(backend)} XLA devices are "
f"available on backend {backend.platform}.")
built_c = primitive_computation(prim, AxisEnv(nreps, (), ()), backend,
tuple_args, *avals, **params)
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=device and (device.id,))
options.parameter_is_tupled_arguments = tuple_args
compiled = backend_compile(backend, built_c, options)
if nreps == 1:
return partial(_execute_compiled_primitive, prim, compiled, handle_result)
else:
return partial(_execute_replicated_primitive, prim, compiled, handle_result)
def _device_from_arg_devices(devices: Sequence[Optional[Device]]) -> Optional[Device]:
"""Given devices of inputs, determine where to perform a computation.
Args:
devices: list where each element is a either a `Device` instance or `None`.
Returns:
A `Device` instance or None.
Raises:
ValueError if input devices are inconsistent.
"""
try:
device, = {d for d in devices if d is not None} or (None,)
return device
except ValueError as err:
msg = "primitive arguments must be colocated on the same device, got {}"
raise ValueError(msg.format(", ".join(map(str, devices)))) from err
@cache()
def primitive_computation(prim, axis_env, backend, tuple_args, *avals, **params):
c = xb.make_computation_builder(f"primitive_computation_{prim.name}")
c.set_op_metadata(xc.OpMetadata(
op_type=prim.name,
op_name=str(pp_eqn_compact(prim.name, params))))
platform = xb.get_backend(backend).platform
xla_args, _ = _xla_callable_args(c, avals, tuple_args)
# return val always set as a side-effect on c
if prim in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][prim]
ans = rule(c, *xla_args, **params)
elif prim in translations:
rule = translations[prim]
ans = rule(c, *xla_args, **params)
elif prim in translations_with_avals:
rule = translations_with_avals[prim]
ans = rule(c, avals, xla_args, params)
elif prim in initial_style_translations:
rule = initial_style_translations[prim]
ans = rule(c, axis_env, extend_name_stack(prim.name), avals, backend,
*xla_args, **params)
else:
raise NotImplementedError(f"XLA translation rule for {prim} not found")
assert isinstance(ans, xe.XlaOp)
c.clear_op_metadata()
try:
return c.build(ans)
except RuntimeError as e:
msg = (" ".join(map(str, e.args)) + "\n"
"This is a bug in JAX's shape-checking rules; please report it!\n"
"https://github.com/google/jax/issues\n")
raise RuntimeError(msg) from e
def primitive_subcomputation(prim, *avals, **params):
axis_env = AxisEnv(1, (), ())
return primitive_computation(prim, axis_env, None, False, *avals, **params)
def backend_compile(backend, built_c, options):
# we use a separate function call to ensure that XLA compilation appears
# separately in Python profiling results
return backend.compile(built_c, compile_options=options)
def _execute_compiled_primitive(prim, compiled, result_handler, *args):
device, = compiled.local_devices()
input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
out_bufs = compiled.execute(input_bufs)
check_special(prim, out_bufs)
return result_handler(*out_bufs)
def _execute_replicated_primitive(prim, compiled, result_handler, *args):
input_bufs = [
list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
for device in compiled.local_devices()]
out_bufs = [
buf[0] for buf in compiled.execute_sharded_on_local_devices(
list(zip(*input_bufs)))
]
return result_handler(*out_bufs)
def check_special(prim, bufs):
if FLAGS.jax_debug_infs or FLAGS.jax_debug_nans:
for buf in bufs:
_check_special(prim.name, buf.xla_shape(), buf)
def _check_special(name, xla_shape, buf):
assert not xla_shape.is_tuple()
if dtypes.issubdtype(xla_shape.element_type(), np.inexact):
if FLAGS.jax_debug_nans and np.any(np.isnan(buf.to_py())):
raise FloatingPointError(f"invalid value (nan) encountered in {name}")
if FLAGS.jax_debug_infs and np.any(np.isinf(buf.to_py())):
raise FloatingPointError(f"invalid value (inf) encountered in {name}")
### compiling jaxprs
def prefetch(x):
if isinstance(x, DeviceArray):
x.copy_to_host_async()
return x
def jaxpr_literals(jaxpr):
"""Generates all the literals inside a jaxpr, including nested subjaxprs."""
for eqn in jaxpr.eqns:
for v in eqn.invars:
if type(v) is core.Literal:
yield v.val
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_literals(subjaxpr)
def _flatmap(func: Callable, vars: Sequence):
return list(it.chain.from_iterable(map(func, vars)))
def _partitionmap(func: Callable, vars: Sequence, nodes: Sequence):
return map(func, vars, _partition_outputs([v.aval for v in vars], nodes))
def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack, *args):
if backend not in ('cpu', 'gpu', 'tpu'):
platform = xb.get_backend(backend).platform # canonicalize
else:
platform = backend
def read(v):
if type(v) is Literal:
return [xb.constant(c, canonicalize_dtype(v.val))]
else:
return env[v]
def aval(v):
if type(v) is Literal:
return abstractify(v.val)
else:
return v.aval
def write(v, node):
assert node is not None
env[v] = node
env = {}
_partitionmap(write, [core.unitvar], [_make_unit_constant(c)])
_partitionmap(write, jaxpr.constvars, consts)
_partitionmap(write, jaxpr.invars, args)
for eqn in jaxpr.eqns:
frame = source_info_util.user_frame(eqn.source_info)
c.set_op_metadata(xc.OpMetadata(
op_type=eqn.primitive.name,
op_name=str(pp(name_stack) >> pp_eqn_compact(
eqn.primitive.name, eqn.params)),
source_file=frame.file_name if frame else None,
source_line=frame.line_num if frame else None))
in_nodes = _flatmap(read, eqn.invars)
# TODO(jakevdp): migrate `translations` table to `translations_with_avals`
if eqn.primitive in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][eqn.primitive]
ans = rule(c, *in_nodes, **eqn.params)
elif eqn.primitive in translations:
ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)
elif eqn.primitive in translations_with_avals:
rule = translations_with_avals[eqn.primitive]
ans = rule(c, map(aval, eqn.invars), in_nodes, eqn.params)
elif eqn.primitive in initial_style_translations:
new_params = check_backend_params(eqn.params, backend)
rule = initial_style_translations[eqn.primitive]
ans = rule(c, axis_env, extend_name_stack(name_stack, eqn.primitive.name),
map(aval, eqn.invars), backend, *in_nodes, **new_params)
elif eqn.primitive in parallel_translations:
rule = parallel_translations[eqn.primitive]
ans = rule(c, *in_nodes, axis_env=axis_env, platform=platform, **eqn.params)
elif eqn.primitive in call_translations:
new_params = check_backend_params(eqn.params, backend)
rule = call_translations[eqn.primitive]
ans = rule(c, axis_env, in_nodes,
name_stack, backend=backend, **new_params)
else:
raise NotImplementedError(
f"XLA translation rule for primitive '{eqn.primitive.name}' not found")
assert isinstance(ans, xe.XlaOp)
c.get_shape(ans) # force xla to do shape error checking
if eqn.primitive.multiple_results or any(v.aval._num_buffers > 1 for v in eqn.outvars):
out_nodes = xla_destructure(c, ans)
else:
out_nodes = [ans]
c.clear_op_metadata()
_partitionmap(write, eqn.outvars, out_nodes)
return _flatmap(read, jaxpr.outvars)
def xla_destructure(c, ans):
num_elements = len(c.get_shape(ans).tuple_shapes())
return [xops.GetTupleElement(ans, i) for i in range(num_elements)]
def check_backend_params(params, outer_backend):
# For nested calls, the outermost call sets the backend for all inner calls;
# it's an error if the inner call has a conflicting explicit backend spec.
inner_backend = params.get('backend', None)
if inner_backend and inner_backend != outer_backend:
raise ValueError(
f"Outer-jit backend specification {outer_backend} must match explicit "
f"inner-jit backend specification {inner_backend}.")
return {k: params[k] for k in params if k != 'backend'}
class AxisEnv(NamedTuple):
"""Represents a pmap mesh (only along the replica axes)."""
nreps: int
names: Tuple[Any, ...]
sizes: Tuple[int, ...]
def extend_axis_env(env: AxisEnv, name, size: int):
return AxisEnv(env.nreps, env.names + (name,), env.sizes + (size,))
def axis_read(axis_env, axis_name):
try:
return max(i for i, name in enumerate(axis_env.names) if name == axis_name)
except ValueError:
raise NameError("unbound axis name: {}".format(axis_name)) from None
def axis_groups(axis_env: AxisEnv, name):
if not isinstance(name, (list, tuple)):
name = (name,)
mesh_axes = tuple(unsafe_map(partial(axis_read, axis_env), name))
trailing_size, ragged = divmod(axis_env.nreps, prod(axis_env.sizes))
assert not ragged
mesh_spec = axis_env.sizes + (trailing_size,)
return _axis_groups(mesh_spec, mesh_axes)
def _axis_groups(mesh_spec, mesh_axes):
"""Computes replica group ids for a collective performed over a subset of the mesh.
Args:
mesh_spec: A sequence of integers representing the mesh shape.
mesh_axes: A sequence of integers between 0 and `len(mesh_spec)` (exclusive)
indicating over which axes the collective is performed.
Returns:
A tuple of replica groups (i.e. tuples containing replica ids).
"""
iota = np.arange(prod(mesh_spec)).reshape(mesh_spec)
groups = np.reshape(
np.moveaxis(iota, mesh_axes, np.arange(len(mesh_axes))),
(prod(np.take(mesh_spec, mesh_axes)), -1))
return tuple(unsafe_map(tuple, groups.T))
def jaxpr_replicas(jaxpr: core.Jaxpr) -> int:
"""The number of replicas needed for a jaxpr.
For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the
subjaxprs. For a list of eqns, take the maximum number of replicas.
"""
return max(unsafe_map(eqn_replicas, jaxpr.eqns), default=1)
# TODO(mattjj): this function assumes that only pmap has a parameter named
# axis_size, and that it corresponds to cross-replica mapping
def eqn_replicas(eqn):
call_jaxpr = eqn.params.get("call_jaxpr")
if call_jaxpr:
return eqn.params.get('axis_size', 1) * jaxpr_replicas(call_jaxpr)
elif eqn.primitive in initial_style_translations:
return initial_style_primitive_replicas(eqn.params)
else:
return 1
def initial_style_primitive_replicas(params):
return max(core.traverse_jaxpr_params(jaxpr_replicas, params), default=1)
# TODO(mattjj,skyewm): the functions here are utilities for checking if
# not-yet-supported features are used with multi-host programming
def jaxpr_has_pmap(jaxpr):
"""Whether there is an xla_pmap primitive anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if 'xla_pmap' in eqn.primitive.name:
return True
for subjaxpr in core.subjaxprs(jaxpr):
if jaxpr_has_pmap(subjaxpr):
return True
return False
def jaxpr_collectives(jaxpr):
"""Generates all the collective primitives anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if eqn.primitive in parallel_translations:
yield eqn.primitive
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_collectives(subjaxpr)
### xla_call underlying jit
def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name, donated_invars):
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
*unsafe_map(arg_spec, args))
try:
return compiled_fun(*args)
except FloatingPointError:
assert FLAGS.jax_debug_nans or FLAGS.jax_debug_infs # compiled_fun can only raise in this case
print("Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version.")
# We want to run the wrapped function again (after _xla_callable already ran
# it), but linear_util.WrappedFun instances are meant to be run only once.
# In addition to re-executing the Python code, which is usually undesirable
# but which FLAGS.jax_debug_nans is meant to opt into, we'll be re-executing
# any linear_util.py-style side effects, i.e. re-populating Stores created
# by any transformation_with_aux's applied to fun. Since this is
# intentional here, to avoid "Store occupied" errors we reset the stores to
# be empty.
for store in fun.stores: store and store.reset()
return fun.call_wrapped(*args) # probably won't return
def flatten_shape(s: XlaShape) -> Sequence[Tuple[Sequence[int], XlaShape]]:
"""Expands a given shape tree into a flat list of indices to arrays.
Given the following computation:
>>> c = xc.XlaBuilder("example")
>>> p0 = xb.parameter(c, 1, xc.shape_from_pyval(jnp.ones([1])))
>>> p1 = xb.parameter(c, 2, xc.shape_from_pyval(jnp.ones([2])))
>>> p2 = xb.parameter(c, 3, xc.shape_from_pyval(jnp.ones([3])))
>>> o = xops.Tuple(c, [p0, p1, p2])
We can query the arrays in the output tuple:
>>> flatten_shape(c.GetShape(o))
(((0,), f32[1]{0}),
((1,), f32[2]{0}),
((2,), f32[3]{0}))
Or the arrays in one of the parameters (which is itself an array):
>>> flatten_shape(c.GetShape(p0))
(((), f32[1]{0}),)
Args
s: The input shape.
Returns:
An iterable of pairs of indices and shapes for each array within the shape
tree.
"""
def _flatten_shape(s, index):
if s.is_array():
yield index, s
else:
assert s.is_tuple()
for i, sub in enumerate(s.tuple_shapes()):
subindex = index + (i,)
if sub.is_tuple():
yield from _flatten_shape(sub, subindex)
else:
yield subindex, sub
return tuple(_flatten_shape(s, index=()))
def _xla_consts(c, consts):
unique_consts = {id(const): const for const in consts}
xla_consts = {
id_: xb.constant(c, const) for id_, const in unique_consts.items()}
return [xla_consts[id(const)] for const in consts]
@lu.cache
def _xla_callable(fun: lu.WrappedFun, device, backend, name, donated_invars, *arg_specs):
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend))
abstract_args, arg_devices = unzip2(arg_specs)
if config.omnistaging_enabled:
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, abstract_args)
if any(isinstance(c, core.Tracer) for c in consts):
raise core.UnexpectedTracerError("Encountered an unexpected tracer.")
else:
pvals: Sequence[pe.PartialVal] = [pe.PartialVal.unknown(aval) for aval in abstract_args]
jaxpr, pvals, consts = pe.trace_to_jaxpr( # type: ignore
fun, pvals, instantiate=False, stage_out=True, bottom=True) # type: ignore
map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
jaxpr = apply_outfeed_rewriter(jaxpr)
nreps = jaxpr_replicas(jaxpr)
device = _xla_callable_device(nreps, backend, device, arg_devices)
backend = device.platform if device else backend
if config.omnistaging_enabled:
result_handlers = map(partial(aval_to_result_handler, device), out_avals)
else:
out_avals = [pval.get_aval() for pval in pvals]
result_handlers = map(partial(_pval_to_result_handler, device), pvals) # type: ignore
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to force their (potentially lazy) arguments.
if not jaxpr.eqns:
return partial(_execute_trivial, jaxpr, device, consts, out_avals, result_handlers)
if not _on_exit:
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(log_priority, "Compiling %s for args %s.", fun.__name__, abstract_args)
if nreps > 1:
warn(f"The jitted function {fun.__name__} includes a pmap. Using "
"jit-of-pmap can lead to inefficient data movement, as the outer jit "
"does not preserve sharded data representations and instead collects "
"input and output arrays onto a single device. "
"Consider removing the outer jit unless you know what you're doing. "
"See https://github.com/google/jax/issues/2926.")
if nreps > xb.device_count(backend):
raise ValueError(
f"compiling computation that requires {nreps} replicas, but only "
f"{xb.device_count(backend)} XLA devices are available")
if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all).")
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _xla_consts(c, consts)
xla_args, donated_invars = _xla_callable_args(c, abstract_args, tuple_args, donated_invars=donated_invars)
out_nodes = jaxpr_subcomp(
c, jaxpr, backend, AxisEnv(nreps, (), ()), xla_consts,
extend_name_stack(wrap_name(name, 'jit')), *xla_args)
out_tuple = xops.Tuple(c, out_nodes)
backend = xb.get_backend(backend)
if backend.platform in ("gpu", "tpu"):
donated_invars = set_up_aliases(c, xla_args, out_tuple, donated_invars, tuple_args)
if any(donated_invars):
# TODO(tomhennigan): At call time we should mark these buffers as deleted.
unused_donations = [str(c.GetShape(a))
for a, d in zip(xla_args, donated_invars) if d]
warn("Some donated buffers were not usable: {}".format(", ".join(unused_donations)))
built = c.build(out_tuple)
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=(device.id,) if device else None)
options.parameter_is_tupled_arguments = tuple_args
compiled = backend_compile(backend, built, options)
if nreps == 1:
return partial(_execute_compiled, compiled, out_avals, result_handlers)
else:
return partial(_execute_replicated, compiled, out_avals, result_handlers)
def set_up_aliases(c, xla_args, out_tuple, donated_args, tuple_args):
"""Configures input/output "must" aliasing based on `donated_args`."""
# First for every input array add it to `donations` iff it is a member of
# `donated_args`.
donations = defaultdict(deque)
for arg_index, arg in enumerate(xla_args):
if donated_args[arg_index]:
for param_index, element in flatten_shape(c.GetShape(arg)):
key = (element.dimensions(), element.numpy_dtype())
if tuple_args:
param_number = 0
param_index = (arg_index,) + tuple(param_index)
donations[key].append((param_number, param_index, arg_index))
else:
param_number = arg_index
donations[key].append((param_number, param_index, arg_index))
# Consume donations for outputs.
out_donated_args = list(donated_args)
for output_index, element in flatten_shape(c.GetShape(out_tuple)):
key = (element.dimensions(), element.numpy_dtype())
if donations.get(key, ()):
param_number, param_index, arg_index = donations[key].popleft()
out_donated_args[arg_index] = False
c.setup_alias(output_index, param_number, param_index)
return tuple(out_donated_args)
def _xla_callable_device(nreps, backend, device, arg_devices):
if nreps > 1:
if device is not None or backend is not None:
raise ValueError(f"can't specify device or backend for jit-of-pmap, "
f"got device={device} and backend={backend}")
return None
else:
if device is None and backend is None:
return _device_from_arg_devices(arg_devices)
elif device is not None and backend is None:
return device
elif device is None and backend is not None:
return xb.get_backend(backend).get_default_device_assignment(1)[0]
else:
assert False # Unreachable given the error check in _xla_callable
# Used within _xla_callable_args and _xla_param to distinguish between None (no
# sharding annotation set) and replicated.
_replicated_param = object()
def _xla_callable_args(
c, avals, tuple_args, *,
replicated=None,
partitions=None,
partitions_proto: bool = False,
donated_invars=None):
assert partitions is None or len(partitions) == len(avals)
if not tuple_args:
if replicated is None:
replicated = [None] * len(avals)
if partitions is None:
parts: List[object] = [None] * len(avals)
elif partitions_proto:
parts = partitions
else:
parts = [_replicated_param if part is None else part
for part in partitions]
counts = it.count()
xla_args = [_xla_param(c, next(counts), xla_shape, r, p, partitions_proto)
if a is not abstract_token else xops.CreateToken(c)
for (a, r, p) in safe_zip(avals, replicated, parts)
for xla_shape in aval_to_xla_shapes(a)]
if donated_invars is not None:
donated_invars = [d
for (a, r, p, d) in safe_zip(avals, replicated, parts, donated_invars)
for xla_shape in aval_to_xla_shapes(a)]
return xla_args, donated_invars
else:
if replicated is not None:
replicated = [r for a, r in zip(avals, replicated)
if a is not abstract_token]
if partitions is None:
tuple_parts = None
elif partitions_proto:
tuple_parts = xb.tuple_sharding_proto(partitions)
else:
tuple_parts = tuple(partitions)
tuple_shape = xc.Shape.tuple_shape(
[shape for a in avals for shape in aval_to_xla_shapes(a) if a is not abstract_token])
tuple_param = _xla_param(c, 0, tuple_shape, replicated, tuple_parts, partitions_proto)
xla_inputs = iter(xla_destructure(c, tuple_param))
xla_args = [next(xla_inputs) if a is not abstract_token else
xops.CreateToken(c) for a in avals]
assert next(xla_inputs, None) is None
return xla_args, donated_invars
def _xla_param(builder, param_num, xla_shape, replicated, partitions, parts_proto):
make_param = partial(xb.parameter, builder, param_num, xla_shape,
replicated=replicated)
with_sharding = xb.with_sharding_proto if parts_proto else xb.with_sharding
if partitions is None:
return make_param()
elif partitions is _replicated_param:
return with_sharding(builder, None, make_param)
else:
return with_sharding(builder, partitions, make_param)
def _execute_compiled(compiled: XlaExecutable, avals, handlers, *args):
device, = compiled.local_devices()
input_bufs = list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
out_bufs = compiled.execute(input_bufs)
check_special(xla_call_p, out_bufs)
return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]
def _execute_replicated(compiled: XlaExecutable, avals, handlers, *args):
input_bufs = [
list(it.chain.from_iterable(device_put(x, device) for x in args if x is not token))
for device in compiled.local_devices()]
out_bufs = [
buf[0] for buf in compiled.execute_sharded_on_local_devices(
list(zip(*input_bufs)))
]
check_special(xla_call_p, out_bufs)
return [handler(*bs) for handler, bs in zip(handlers, _partition_outputs(avals, out_bufs))]
def _execute_trivial(jaxpr, device: Optional[Device], consts, avals, handlers, *args):
env = {core.unitvar: core.unit}
map(env.setdefault, jaxpr.invars, args)
map(env.setdefault, jaxpr.constvars, consts)
outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]
for v in jaxpr.outvars]
return [_copy_device_array_to_device(x, device) if type_is_device_array(x)
else h(*device_put(x, device)) for h, x in zip(handlers, outs)]
xla_call_p = core.CallPrimitive('xla_call')
xla_call = xla_call_p.bind
xla_call_p.def_impl(_xla_call_impl)
def _xla_call_partial_eval_update_params(params, in_unknowns):
call_jaxpr = params['call_jaxpr']
donated_invars = params['donated_invars']
if not in_unknowns and donated_invars:
# JaxprTrace.post_process_call creates a call with no input tracers
new_donated_invars = (False,) * len(call_jaxpr.invars)
else:
# JaxprTrace.process_call drops known input tracers
donated_invars = [d for d, uk in zip(donated_invars, in_unknowns) if uk]
new_donated_invars = ((False,) * (len(call_jaxpr.invars) - len(donated_invars))
+ tuple(donated_invars))
return dict(params, donated_invars=new_donated_invars)
pe.call_param_updaters[xla_call_p] = _xla_call_partial_eval_update_params
def _xla_call_jvp_update_params(params, nz_tangents):
donated_invars = params['donated_invars']
donated_tangents = [d for d, nz in zip(donated_invars, nz_tangents) if nz]
new_donated_invars = (*donated_invars, *donated_tangents)
return dict(params, donated_invars=new_donated_invars)
ad.call_param_updaters[xla_call_p] = _xla_call_jvp_update_params
def _xla_call_transpose_update_params(params, undef_primals, nonzero_cts):
donated_invars = params['donated_invars']
donated_primals = [d for d, u in zip(donated_invars, undef_primals) if not u]
donated_cotangents = [False for nz in nonzero_cts if nz]
return dict(params, donated_invars=(*donated_primals, *donated_cotangents))
ad.call_transpose_param_updaters[xla_call_p] = _xla_call_transpose_update_params
def _xla_call_translation_rule(c, axis_env,
in_nodes, name_stack, backend, name,
call_jaxpr, donated_invars, device=None):
del device, donated_invars # Ignored.
subc = xb.make_computation_builder(f"jit_{name}")
args = [xb.parameter(subc, i, c.get_shape(n)) for i, n in enumerate(in_nodes)]
out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'jit')), *args)
subc = subc.build(xops.Tuple(subc, out_nodes))
return xops.Call(c, subc, list(in_nodes))
ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)
### translation tables
translations: Dict[core.Primitive, Callable] = {}
translations_with_avals: Dict[core.Primitive, Callable] = {}
parallel_translations: Dict[core.Primitive, Callable] = {}
initial_style_translations: Dict[core.Primitive, Callable] = {}
call_translations: Dict[core.Primitive, Callable] = {}
backend_specific_translations: Dict[str, Dict[core.Primitive, Callable]] = defaultdict(dict)
call_translations[xla_call_p] = _xla_call_translation_rule
def zeros_like_translation_rule(c, x):
shape = c.get_shape(x)
assert not shape.is_tuple()
zero = xb.constant(c, np.array(0, shape.element_type()))
return xops.Broadcast(zero, shape.dimensions())
translations[ad_util.zeros_like_p] = zeros_like_translation_rule
def add_jaxvals_translation_rule(c, x, y):
shape = c.get_shape(x)
assert not shape.is_tuple()
return xops.Add(x, y)
translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule
translations[ad_util.stop_gradient_p] = lambda c, x: x
@lu.transformation
def _tuple_output(*args, **kwargs):
ans = yield args, kwargs
yield (ans,)
def lower_fun(fun, multiple_results, parallel=False, with_avals=False):
# TODO(jakevdp): migrate dependent code & always use the with_avals=True.
def f(c, *xla_args, **params):
avals = [_array_aval_from_xla_shape(c.get_shape(x)) for x in xla_args]
return f_with_avals(c, avals, xla_args, params)
def f_with_avals(c, avals, xla_args, params):
if parallel:
axis_env = params.pop('axis_env')
del params['platform']
else:
axis_env = AxisEnv(1, (), ())
wrapped_fun = lu.wrap_init(fun, params)
if not multiple_results:
wrapped_fun = _tuple_output(wrapped_fun)
if config.omnistaging_enabled:
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, avals)
outs = jaxpr_subcomp(c, jaxpr, None, axis_env, _xla_consts(c, consts), '',
*xla_args)
else:
pvals = [pe.PartialVal.unknown(a) for a in avals]
jaxpr, _, consts = pe.trace_to_jaxpr(wrapped_fun, pvals, instantiate=True,
stage_out=True) # type: ignore
xla_consts = _xla_consts(c, consts)
outs = jaxpr_subcomp(c, jaxpr, None, axis_env, xla_consts, '', *xla_args)
if multiple_results or any(v.aval._num_buffers > 1 for v in jaxpr.outvars):
return xops.Tuple(c, outs)
else:
assert len(outs) == 1, outs
return outs[0]
return f_with_avals if with_avals else f
def _array_aval_from_xla_shape(xla_shape):
# This function instantiates the assumption that we can map fro XLA array
# types to JAX array types.
# TODO(mattjj): remove assumption can map XLA array types to JAX array types
assert not xla_shape.is_tuple()
return ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype())
def lower_fun_initial_style(fun):
def f(c, axis_env, name_stack, avals, backend, *xla_args, **params):
if config.omnistaging_enabled:
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(lu.wrap_init(fun, params), avals)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, _xla_consts(c, consts),
name_stack, *xla_args)
else:
pvals = [pe.PartialVal.unknown(a) for a in avals]
jaxpr, _, consts = pe.trace_to_jaxpr(
lu.wrap_init(fun, params), pvals, instantiate=True, stage_out=True) # type: ignore
xla_consts = _xla_consts(c, consts)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, name_stack,
*xla_args)
return xops.Tuple(c, outs)
return f
### device-persistent data
class Token(object): pass
token = Token()
pytype_aval_mappings[Token] = lambda _: abstract_token
core.pytype_aval_mappings[Token] = lambda _: abstract_token
xla_shape_handlers[AbstractToken] = lambda _: (xc.Shape.token_shape(),)
xla_result_handlers[AbstractToken] = lambda _, __: lambda _: token
canonicalize_dtype_handlers[Token] = identity
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
# The following is used for the type _CppDeviceArray or _DeviceArray.
DeviceArrayProtocol = Any
DeviceArray = xc.DeviceArrayBase
_CppDeviceArray: DeviceArrayProtocol = xc.Buffer
_EXPERIMENTAL_CPP_DEVICE_ARRAY = False
def make_device_array(
aval: core.ShapedArray,
device: Optional[Device],
lazy_expr: Optional[lazy.LazyExpr],
device_buffer: Union[PyLocalBuffer, "DeviceConstant"],
) -> Union[PyLocalBuffer, "_DeviceArray"]:
"""Returns a DeviceArray implementation based on arguments.
This is to be used only within JAX. It will return either a PythonDeviceArray
or a C++ equivalent implementation.
"""
if (_EXPERIMENTAL_CPP_DEVICE_ARRAY and lazy.is_trivial(lazy_expr) and
not isinstance(device_buffer, DeviceConstant)):
assert isinstance(device_buffer, _CppDeviceArray)
device_buffer._device = device # pylint: disable=protected-access
device_buffer.aval = aval
return device_buffer
return _DeviceArray(aval, device, lazy_expr, device_buffer)
def type_is_device_array(x):
"""Returns `True` if `x` is a non-sharded DeviceArray.
Use this function instead of `type(x) is Devicearray`.
"""
type_x = type(x)
return type_x is _DeviceArray or type_x is _CppDeviceArray
class _DeviceArray(DeviceArray): # type: ignore
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = [
"aval", "device_buffer", "_npy_value", "_device", "_lazy_expr"
]
__array_priority__ = 100
# DeviceArray has methods that are dynamically populated in lax_numpy.py,
# and this annotation is needed to make pytype happy.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, aval: core.ShapedArray, device: Optional[Device],
lazy_expr: Optional[lazy.LazyExpr],
device_buffer: PyLocalBuffer):
"""Initializer.
Args:
aval: The abstract value associated to this array (shape+dtype+weak_type).
device: The optional sticky device. See
https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices
lazy_expr: An optional `LayExpr`. `None` is equivalent to a trivial
`LazyExpr`.
device_buffer: The underlying buffer owning the on-device data.
"""
DeviceArray.__init__(self)
self.aval = aval
self.device_buffer = device_buffer
self._device = device
self._lazy_expr = lazy_expr
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
assert (device is None) or device is device_buffer.device()
def _check_if_deleted(self):
if self.device_buffer is deleted_buffer:
raise RuntimeError("DeviceArray has been deleted.")
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
Returns the buffer object (`self`).
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready() # pytype: disable=attribute-error
return self
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
if is_device_constant(self):
self._npy_value = lazy.eval_lexpr(self._lazy_expr, None)
else:
self._npy_value = _force(self).device_buffer.to_py()
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None and not is_device_constant(self):
self.device_buffer.copy_to_host_async() # pytype: disable=attribute-error
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete() # pytype: disable=attribute-error
self.device_buffer = deleted_buffer
self._npy_value = None
@property
def __cuda_array_interface__(self):
return _force(self).device_buffer.__cuda_array_interface__
# Adding methods dynamically to both _DeviceArray and _CppDeviceArray
# pylint: disable=protected-access
for device_array in [_DeviceArray, _CppDeviceArray]:
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return np.asarray(self)
setattr(device_array, "copy", copy)
def __repr__(self):
line_width = np.get_printoptions()["linewidth"]
prefix = '{}('.format(self.__class__.__name__.lstrip('_'))
s = np.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
dtype_str = 'dtype={})'.format(self.dtype.name)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
setattr(device_array, "__repr__", __repr__)
def item(self):
if dtypes.issubdtype(self.dtype, np.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, np.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, np.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, np.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
setattr(device_array, "item", item)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
setattr(device_array, "__len__", __len__)
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return self._value.__iter__()
setattr(device_array, "__iter__", __iter__)
def __reversed__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array")
else:
return reversed(self._value)
setattr(device_array, "__reversed__", __reversed__)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
setattr(device_array, "__format__", __format__)
def __array__(self, dtype=None, context=None):
return np.asarray(self._value, dtype=dtype)
setattr(device_array, "__array__", __array__)
setattr(device_array, "__str__", partialmethod(_forward_to_value, str))
setattr(device_array, "__bool__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__nonzero__", partialmethod(_forward_to_value, bool))
setattr(device_array, "__float__", lambda self: self._value.__float__())
setattr(device_array, "__int__", lambda self: self._value.__int__())
setattr(device_array, "__complex__", lambda self: self._value.__complex__())
setattr(device_array, "__hex__", partialmethod(_forward_to_value, hex))
setattr(device_array, "__oct__", partialmethod(_forward_to_value, oct))
setattr(device_array, "__index__", partialmethod(_forward_to_value, op.index))
to_bytes = lambda self, order="C": self._value.tobytes(order)
setattr(device_array, "tobytes", to_bytes)
del to_bytes
setattr(device_array, "tolist", lambda self: self._value.tolist())
# pickle saves and loads just like an ndarray
setattr(device_array, "__reduce__",
partialmethod(_forward_to_value, op.methodcaller("__reduce__")))
# clobbered when jax.numpy is imported, but useful in tests
setattr(device_array, "__eq__", lambda self, other: self._value == other)
def __hash__(self):
raise TypeError("JAX DeviceArray, like numpy.ndarray, is not hashable.")
setattr(device_array, "__hash__", __hash__)
# The following methods are dynamically overridden in lax_numpy.py.
def raise_not_implemented():
raise NotImplementedError
setattr(device_array, "__getitem__", lambda self, i: raise_not_implemented())
# pylint: enable=protected-access
class DeletedBuffer(object): pass
deleted_buffer = DeletedBuffer()
class DeviceConstant(object):
__slots__ = ["_device"]
def __init__(self, device=None): self._device = device
def device(self): return self._device
def to_py(self): return None
def is_device_constant(x):
return type_is_device_array(x) and type(x.device_buffer) is DeviceConstant
for device_array in [_CppDeviceArray, _DeviceArray]:
core.literalable_types.add(device_array)
core.pytype_aval_mappings[device_array] = ConcreteArray
pytype_aval_mappings[device_array] = op.attrgetter('aval')
canonicalize_dtype_handlers[device_array] = identity
def _device_array_constant_handler(c, val, canonicalize_types=True):
if is_device_constant(val):
return lazy.stage_lexpr(c, val._lazy_expr, None)
else:
base_val = xb.constant(c, val.device_buffer.to_py())
return lazy.stage_lexpr(c, val._lazy_expr, base_val)
xb.register_constant_handler(_DeviceArray, _device_array_constant_handler)
xb.register_constant_handler(_CppDeviceArray, _device_array_constant_handler)
def _device_put_device_array(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[Device]):
x = _copy_device_array_to_device(x, device)
return (_force(x).device_buffer,)
device_put_handlers[_CppDeviceArray] = _device_put_device_array
device_put_handlers[_DeviceArray] = _device_put_device_array
def _copy_device_array_to_device(x: Union[DeviceArrayProtocol, _DeviceArray], device: Optional[xc.Device]) -> Union[DeviceArrayProtocol, _DeviceArray]:
if device is None:
# no copying to be done because there's no target specified
return x
elif is_device_constant(x):
# create a new DeviceArray with the same lazy expr, no copying
return make_device_array(x.aval, device, x._lazy_expr,
DeviceConstant(device))
elif xb.get_device_backend(device).platform == x.device_buffer.platform():
# source and target platforms are the same
if x.device_buffer.device() == device:
# no copying to be done because source equals target
if x._device == device:
return x
else:
moved_buf = x.device_buffer # We need to change stickyness
else:
# move the buffer with a device-to-device copy
moved_buf = x.device_buffer.copy_to_device(device)
else:
# buffers from different XLA backends are passed through the host.
backend = xb.get_device_backend(device)
moved_buf = backend.buffer_from_pyval(x.device_buffer.to_py(), device)
return _DeviceArray(x.aval, device, x._lazy_expr, moved_buf)
def _force(x: DeviceArrayProtocol) -> DeviceArrayProtocol:
if lazy.is_trivial(x._lazy_expr):
return x
else:
# force x on the device where it lives, but preserve stickiness on result
if x._device:
device = x._device
else:
device = x.device_buffer.device()
force_fun = _lazy_force_computation(x.aval, device, x._lazy_expr)
result = force_fun(x)
return make_device_array(x.aval, x._device, lazy.array(x.aval.shape), result)
@cache()
def _lazy_force_computation(aval: core.ShapedArray,
device: Device, lexpr: lazy.LazyExpr
) -> Callable[[_DeviceArray], PyLocalBuffer]:
c = xb.make_computation_builder("lazy_force")
if lazy.is_constant(lexpr):
param = None
else:
idxs = [(src, dst) for dst, src in enumerate(lexpr.dims) if src is not None]
param_shape = [None] * len(idxs)
for src, dst in idxs:
param_shape[src] = aval.shape[dst]
param = xb.parameter(c, 0, xc.Shape.array_shape(aval.dtype, param_shape))
xla_out = lazy.stage_lexpr(c, lexpr, param)
built_c = c.build(xla_out)
device = _device_from_arg_devices([device])
options = xb.get_compile_options(
num_replicas=1,
num_partitions=1,
device_assignment=device and (device.id,))
compiled = backend_compile(xb.get_device_backend(device), built_c, options)
force_fun: Callable[[_DeviceArray], PyLocalBuffer]
if lazy.is_constant(lexpr):
def force_fun(_):
return compiled.execute([])[0]
else:
def force_fun(x):
return compiled.execute([x.device_buffer])[0]
return force_fun
def _device_put_impl(x, device: Optional[Device] = None):
if type_is_device_array(x):
return _copy_device_array_to_device(x, device)
try:
a = abstractify(x)
except TypeError as err:
raise TypeError(
f"Argument '{x}' of type {type(x)} is not a valid JAX type") from err
return aval_to_result_handler(device, a)(*device_put(x, device))
device_put_p = core.Primitive('device_put')
device_put_p.def_impl(_device_put_impl)
device_put_p.def_abstract_eval(lambda x, device=None: x)
translations[device_put_p] = lambda c, x, device=None: x
ad.deflinear2(device_put_p, lambda cotangent, _, **kwargs: [cotangent])
masking.defvectorized(device_put_p)
def _remat_translation_rule(c, axis_env, in_nodes,
name_stack, backend, name, call_jaxpr,
device=None, concrete=None):
"""Lower remat to a Conditional which always returns true. This:
1. Circumvents common subexpression elimination.
2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks
occur after the primal blocks, because cotangent is an input to the
Conditional."""
del device, concrete # Unused.
# Fake condition which always selects True branch.
rng = xops.RngUniform(xb.constant(c, np.array(0, dtype=np.float32)),
xb.constant(c, np.array(1, dtype=np.float32)),
xc.Shape.array_shape(xc.PrimitiveType.F32, []))
pred = xops.Lt(rng, xb.constant(c, np.array(2, dtype=np.float32)))
true_op = xops.Tuple(c, in_nodes)
remat_subc = xb.make_computation_builder("remat_call_subcomputation")
input_op = xb.parameter(remat_subc, 0, c.get_shape(true_op), replicated=[])
args = [xops.GetTupleElement(input_op, i) for i in range(len(in_nodes))]
out_nodes = jaxpr_subcomp(remat_subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'remat')),
*args)
out_node_shapes = [remat_subc.get_shape(o) for o in out_nodes]
remat_subc = remat_subc.build(xops.Tuple(remat_subc, out_nodes))
false_op = true_op
dummy_subc = xb.make_computation_builder("remat_call_dummy_subcomputation")
xb.parameter(dummy_subc, 0, c.get_shape(false_op), replicated=[])
def zeros(xla_shape):
if xla_shape.is_array():
shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()
zero = xb.constant(dummy_subc, np.array(0, dtype=dtype))
return xops.Broadcast(zero, shape)
else:
# It is a token
return xops.CreateToken(dummy_subc)
out_nodes = [zeros(s) for s in out_node_shapes]
dummy_subc = dummy_subc.build(xops.Tuple(dummy_subc, out_nodes))
return xops.Conditional(pred, true_op, remat_subc, false_op, dummy_subc)
call_translations[pe.remat_call_p] = _remat_translation_rule # type: ignore
ad.primitive_transposes[core.named_call_p] = partial(ad.call_transpose,
core.named_call_p)
def _named_call_translation_rule(c, axis_env, in_nodes, name_stack, *,
name="core_call", backend, call_jaxpr):
subc = xb.make_computation_builder(name)
args = [xb.parameter(subc, i, c.GetShape(n)) for i, n in enumerate(in_nodes)]
out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, name), *args)
subc = subc.Build(xops.Tuple(subc, out_nodes))
return xops.Call(c, subc, list(in_nodes))
call_translations[core.named_call_p] = _named_call_translation_rule
def _call_translation_rule(c, axis_env, in_nodes, name_stack, *, backend,
call_jaxpr):
return _named_call_translation_rule(
c, axis_env, in_nodes, name_stack, name="core_call",
backend=backend, call_jaxpr=call_jaxpr)
call_translations[core.call_p] = _call_translation_rule
@config.register_omnistaging_disabler
def omnistaging_disabler() -> None:
global _pval_to_result_handler
def _pval_to_result_handler(device, pval):
pv, const = pval
if pv is None:
const = _device_put_impl(const, device) if device else const
return lambda _: const
else:
return aval_to_result_handler(device, pv)
pe.staged_out_calls.add(xla_call_p) # type: ignore
| 39.268013
| 151
| 0.715484
|
e080c229040c8e1a35883b8be7b4ebaa878ade47
| 12,821
|
py
|
Python
|
codes/models/loss.py
|
zhouhuanxiang/mmsr
|
4d3f0d2cbfc4f259a2998655413330b4448c1056
|
[
"Apache-2.0"
] | null | null | null |
codes/models/loss.py
|
zhouhuanxiang/mmsr
|
4d3f0d2cbfc4f259a2998655413330b4448c1056
|
[
"Apache-2.0"
] | null | null | null |
codes/models/loss.py
|
zhouhuanxiang/mmsr
|
4d3f0d2cbfc4f259a2998655413330b4448c1056
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
class TensorAxis:
N = 0
H = 1
W = 2
C = 3
class CSFlow:
def __init__(self, sigma=float(0.1), b=float(1.0)):
self.b = b
self.sigma = sigma
def __calculate_CS(self, scaled_distances, axis_for_normalization=TensorAxis.C):
self.scaled_distances = scaled_distances
self.cs_weights_before_normalization = torch.exp((self.b - scaled_distances) / self.sigma)
# self.cs_weights_before_normalization = 1 / (1 + scaled_distances)
self.cs_NHWC = CSFlow.sum_normalize(self.cs_weights_before_normalization, axis_for_normalization)
# self.cs_NHWC = self.cs_weights_before_normalization
# def reversed_direction_CS(self):
# cs_flow_opposite = CSFlow(self.sigma, self.b)
# cs_flow_opposite.raw_distances = self.raw_distances
# work_axis = [TensorAxis.H, TensorAxis.W]
# relative_dist = cs_flow_opposite.calc_relative_distances(axis=work_axis)
# cs_flow_opposite.__calculate_CS(relative_dist, work_axis)
# return cs_flow_opposite
# --
@staticmethod
def create_using_L2(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
sT = T_features.shape
sI = I_features.shape
# Nx(HW)xC
Ivecs = torch.reshape(I_features, (sI[0], -1, sI[3]))
Tvecs = torch.reshape(T_features, (sI[0], -1, sT[3]))
# Nx(HW)
r_Ts = torch.sum(Tvecs * Tvecs, 2)
r_Is = torch.sum(Ivecs * Ivecs, 2)
raw_distances_list = []
for i in range(sT[0]):
# (HW)xC, (HW)XC, (HW), (HW)
Ivec, Tvec, r_T, r_I = Ivecs[i], Tvecs[i], r_Ts[i], r_Is[i]
# (HW)x(HW)
A = Tvec @ torch.transpose(Ivec, 0, 1) # (matrix multiplication)
cs_flow.A = A
# A = tf.matmul(Tvec, tf.transpose(Ivec))
# (HW)x1
r_T = torch.reshape(r_T, [-1, 1]) # turn to column vector
# (HW)x(HW)
dist = r_T - 2 * A + r_I
dist = torch.reshape(torch.transpose(dist, 0, 1), shape=(1, sI[1], sI[2], dist.shape[0]))
# protecting against numerical problems, dist should be positive
dist = torch.clamp(dist, min=float(0.0))
# dist = tf.sqrt(dist)
raw_distances_list += [dist]
cs_flow.raw_distances = torch.cat(raw_distances_list)
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
# --
@staticmethod
def create_using_L1(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
sT = T_features.shape
sI = I_features.shape
Ivecs = torch.reshape(I_features, (sI[0], -1, sI[3]))
Tvecs = torch.reshape(T_features, (sI[0], -1, sT[3]))
raw_distances_list = []
for i in range(sT[0]):
Ivec, Tvec = Ivecs[i], Tvecs[i]
dist = torch.abs(torch.sum(Ivec.unsqueeze(1) - Tvec.unsqueeze(0), dim=2))
dist = torch.reshape(torch.transpose(dist, 0, 1), shape=(1, sI[1], sI[2], dist.shape[0]))
# protecting against numerical problems, dist should be positive
dist = torch.clamp(dist, min=float(0.0))
# dist = tf.sqrt(dist)
raw_distances_list += [dist]
cs_flow.raw_distances = torch.cat(raw_distances_list)
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
# --
@staticmethod
def create_using_dotP(I_features, T_features, sigma=float(0.5), b=float(1.0)):
cs_flow = CSFlow(sigma, b)
# prepare feature before calculating cosine distance
cs_flow.center_by_T(T_features, I_features)
CSFlow.l2_normalize_channelwise_inplace(cs_flow)
# T_features, I_features = cs_flow.center_by_T(T_features, I_features)
# T_features = CSFlow.l2_normalize_channelwise(T_features)
# I_features = CSFlow.l2_normalize_channelwise(I_features)
# work seperatly for each example in dim 1
cosine_dist_l = []
N = T_features.size()[0]
for i in range(N):
T_features_i = cs_flow.T_features_centered[i, :, :, :].unsqueeze(0) # 1HWC --> 1CHW
I_features_i = cs_flow.I_features_centered[i, :, :, :].unsqueeze(0).permute((0, 3, 1, 2))
patches_PC11_i = cs_flow.patch_decomposition(T_features_i) # 1HWC --> PC11, with P=H*W
cosine_dist_i = torch.nn.functional.conv2d(I_features_i, patches_PC11_i)
# cosine_dist_1HWC = cosine_dist_i.permute((0, 2, 3, 1))
cosine_dist_l.append(cosine_dist_i.permute((0, 2, 3, 1))) # back to 1HWC
# cs_flow.cs_NHWC = cosine_dist_l[0]
# return cs_flow
# cs_flow.cs_NHWC = cosine_dist_l[0]
# return cs_flow
cs_flow.cosine_dist = torch.cat(cosine_dist_l, dim=0)
cs_flow.raw_distances = - (cs_flow.cosine_dist - 1) / 2 ### why -
relative_dist = cs_flow.calc_relative_distances()
cs_flow.__calculate_CS(relative_dist)
return cs_flow
def calc_relative_distances(self, axis=TensorAxis.C):
epsilon = 1e-5
div = torch.min(self.raw_distances, dim=axis, keepdim=True)[0]
relative_dist = self.raw_distances / (div + epsilon)
return relative_dist
@staticmethod
def sum_normalize(cs, axis=TensorAxis.C):
reduce_sum = torch.sum(cs, dim=axis, keepdim=True)
cs_normalize = torch.div(cs, reduce_sum)
return cs_normalize
def center_by_T(self, T_features, I_features):
# assuming both input are of the same size
# calculate stas over [batch, height, width], expecting 1x1xDepth tensor
axes = [0, 1, 2]
self.meanT = T_features.mean(0, keepdim=True).mean(1, keepdim=True).mean(2, keepdim=True)
# self.varT = T_features.var(0, keepdim=True).var(1, keepdim=True).var(2, keepdim=True)
self.T_features_centered = torch.sub(T_features, self.meanT)
self.I_features_centered = torch.sub(I_features, self.meanT)
# return self.T_features_centered, self.I_features_centered
@staticmethod
def l2_normalize_channelwise(features):
norms = features.norm(p=2, dim=TensorAxis.C, keepdim=True)
features = features.div(norms)
return features
@staticmethod
def l2_normalize_channelwise_inplace(cs_flow):
norms = cs_flow.T_features_centered.norm(p=2, dim=TensorAxis.C, keepdim=True)
cs_flow.T_features_centered = cs_flow.T_features_centered.div(norms)
norms = cs_flow.I_features_centered.norm(p=2, dim=TensorAxis.C, keepdim=True)
cs_flow.I_features_centered = cs_flow.I_features_centered.div(norms)
def patch_decomposition(self, T_features):
# 1HWC --> 11PC --> PC11, with P=H*W
(N, H, W, C) = T_features.shape
P = H * W
patches_PC11 = T_features.reshape(shape=(1, 1, P, C)).permute(dims=(2, 3, 0, 1))
return patches_PC11
@staticmethod
def pdist2(x, keepdim=False):
sx = x.shape
x = x.reshape(shape=(sx[0], sx[1] * sx[2], sx[3]))
differences = x.unsqueeze(2) - x.unsqueeze(1)
distances = torch.sum(differences**2, -1)
if keepdim:
distances = distances.reshape(shape=(sx[0], sx[1], sx[2], sx[3]))
return distances
@staticmethod
def calcR_static(sT, order='C', deformation_sigma=0.05):
# oreder can be C or F (matlab order)
pixel_count = sT[0] * sT[1]
rangeRows = range(0, sT[1])
rangeCols = range(0, sT[0])
Js, Is = np.meshgrid(rangeRows, rangeCols)
row_diff_from_first_row = Is
col_diff_from_first_col = Js
row_diff_from_first_row_3d_repeat = np.repeat(row_diff_from_first_row[:, :, np.newaxis], pixel_count, axis=2)
col_diff_from_first_col_3d_repeat = np.repeat(col_diff_from_first_col[:, :, np.newaxis], pixel_count, axis=2)
rowDiffs = -row_diff_from_first_row_3d_repeat + row_diff_from_first_row.flatten(order).reshape(1, 1, -1)
colDiffs = -col_diff_from_first_col_3d_repeat + col_diff_from_first_col.flatten(order).reshape(1, 1, -1)
R = rowDiffs ** 2 + colDiffs ** 2
R = R.astype(np.float32)
R = np.exp(-(R) / (2 * deformation_sigma ** 2))
return R
class ContextualLoss(nn.Module):
def __init__(self):
super(ContextualLoss, self).__init__()
def forward(self, I_features, T_features, deformation=False, dis=False):
def from_pt2tf(Tpt):
Ttf = Tpt.permute(0, 2, 3, 1)
return Ttf
# N x C x H x W --> N x H x W x C
T_features_tf = from_pt2tf(T_features)
I_features_tf = from_pt2tf(I_features)
cs_flow = CSFlow.create_using_dotP(I_features_tf, T_features_tf, sigma=1.0)
# return torch.mean(I_features - T_features)
# cs_flow = CSFlow.create_using_L2(I_features_tf, T_features_tf, sigma=1.0)
# sum_normalize:
# To:
cs = cs_flow.cs_NHWC
if deformation:
deforma_sigma = 0.001
sT = T_features_tf.shape[1:2 + 1]
R = CSFlow.calcR_static(sT, deformation_sigma=deforma_sigma)
cs *= torch.Tensor(R).unsqueeze(dim=0).cuda()
if dis:
CS = []
k_max_NC = torch.max(torch.max(cs, dim=1)[1], dim=1)[1]
indices = k_max_NC.cpu()
N, C = indices.shape
for i in range(N):
CS.append((C - len(torch.unique(indices[i, :]))) / C)
score = torch.FloatTensor(CS)
else:
# reduce_max X and Y dims
# cs = CSFlow.pdist2(cs,keepdim=True)
k_max_NC = torch.max(torch.max(cs, dim=1)[0], dim=1)[0]
# reduce mean over C dim
CS = torch.mean(k_max_NC, dim=1)
# score = 1/CS
# score = torch.exp(-CS*10)
score = -torch.log(CS)
# reduce mean over N dim
# CX_loss = torch.mean(CX_loss)
score = score.mean()
return score
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'gan' or self.gan_type == 'ragan':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp,
grad_outputs=grad_outputs, create_graph=True,
retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
| 39.693498
| 117
| 0.616879
|
4a0122409dae530dac8fe1b1ac5d2bb914a01cb8
| 3,500
|
py
|
Python
|
maipc/RNN/processing.py
|
nPironio/maipc
|
776e1d53063005d89ce463883c20ea5519a8b8d0
|
[
"BSD-3-Clause"
] | null | null | null |
maipc/RNN/processing.py
|
nPironio/maipc
|
776e1d53063005d89ce463883c20ea5519a8b8d0
|
[
"BSD-3-Clause"
] | null | null | null |
maipc/RNN/processing.py
|
nPironio/maipc
|
776e1d53063005d89ce463883c20ea5519a8b8d0
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List, Dict
import madmom as md
import numpy as np
NN_DTYPE = np.float32
def get_blstms(RNN: str) -> List[md.ml.nn.layers.BidirectionalLayer]:
"""
Get the input and bidirectional LSTM layers of a trained model
Args:
RNN: path to madmom trained NN
Returns: list of layers
"""
rnn = md.ml.nn.NeuralNetwork.load(RNN)
return rnn.layers[:3]
def layers_values(RNN_input: List[float], blstms: List[md.ml.nn.layers.BidirectionalLayer],
ppty_name: str) -> List[Dict[str, List[np.ndarray]]]:
"""
Get internal value activations for an input
Args:
RNN_input: input for the RNN
blstms: list of the bidirectional layers of the network
ppty_name: the type of values to get
Returns: values organized by layer, direction (fwd/bwd) and frame
"""
layer_input = RNN_input
layer_values = []
for bi_layer in blstms:
layer_input, values = get_bidirectional_values(bi_layer, layer_input, ppty_name)
layer_values.append(values)
return layer_values
def get_bidirectional_values(bi_layer: md.ml.nn.layers.BidirectionalLayer,
layer_input: List[float], ppty_name: str) -> Dict[str, List[np.ndarray]]:
"""
Get the activation values for the forward and backward layer of a bidirectional layer
Args:
bi_layer: bidirectional layer
layer_input: input to process by the layer
ppty_name: the type of values to get
Returns: dictionary with forward and backward layer activation values
"""
fwd, fwd_values = neurons_values(bi_layer.fwd_layer, layer_input, ppty_name)
# also activate with reverse input
bwd, bwd_values = neurons_values(bi_layer.bwd_layer, layer_input, ppty_name)
# stack data
output = np.hstack((fwd, bwd[::-1]))
return output , {'forward': fwd_values, 'backward': bwd_values}
def neurons_values(lstm_layer: md.ml.nn.layers.LSTMLayer, data: List[float], ppty_name: str) -> List[np.ndarray]:
"""
Get the activation values for a LSTM layer
Args:
lstm_layer: LSTM layer
data: data to process
ppty_name: the type of values to get
Returns: List where each position is the activation value for a frame
"""
# init arrays
size = len(data)
# output matrix for the whole sequence
out = np.zeros((size, lstm_layer.cell.bias.size), dtype=NN_DTYPE)
# output list of internal values
ppty_values = {'cell_state': [], 'output': []}
# process the input data
for i in range(size):
# cache input data
data_ = data[i]
# input gate:
# operate on current data, previous output and state
ig = lstm_layer.input_gate.activate(data_, lstm_layer._prev, lstm_layer._state)
# forget gate:
# operate on current data, previous output and state
fg = lstm_layer.forget_gate.activate(data_, lstm_layer._prev, lstm_layer._state)
# cell:
# operate on current data and previous output
cell = lstm_layer.cell.activate(data_, lstm_layer._prev)
# internal state:
# weight the cell with the input gate
# and add the previous state weighted by the forget gate
lstm_layer._state = cell * ig + lstm_layer._state * fg
# output gate:
# operate on current data, previous output and current state
og = lstm_layer.output_gate.activate(data_, lstm_layer._prev, lstm_layer._state)
# output:
# apply activation function to state and weight by output gate
out[i] = lstm_layer.activation_fn(lstm_layer._state) * og
# set reference to current output
lstm_layer._prev = out[i]
# store internal values
ppty_values['cell_state'].append(cell)
ppty_values['output'].append(out[i])
return out, ppty_values[ppty_name]
| 33.653846
| 113
| 0.742
|
7af5264245ac98ac815eb77ed77b99e4f5725333
| 3,072
|
py
|
Python
|
tests/common/test_run/apply_rms_prop_mixed_precision_run.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | 1
|
2020-08-31T02:43:43.000Z
|
2020-08-31T02:43:43.000Z
|
tests/common/test_run/apply_rms_prop_mixed_precision_run.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | null | null | null |
tests/common/test_run/apply_rms_prop_mixed_precision_run.py
|
laekov/akg
|
5316b8cb2340bbf71bdc724dc9d81513a67b3104
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""apply_rms_prop_mixed_precision_run"""
import numpy as np
from tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from test_op import apply_rms_prop
from base import get_rtol_atol
from gen_random import random_gaussian
def apply_rms_prop_mixed_precision_run(shape, dtype, lr, momentum, rho, epsilon, attrs=None):
"""run function for dsl function apply_rms_prop_mixed_precision."""
if attrs is None:
attrs = {}
dtype = dtype.lower()
shapes = [shape, shape, shape, shape, (1,), (1,), (1,)]
types = [dtype, dtype, dtype, dtype, dtype, dtype, dtype]
op_attrs = [epsilon]
mod = utils.op_build_test(apply_rms_prop.apply_rms_prop_mixed_precision, shapes, types,
op_attrs=op_attrs, kernel_name="apply_rms_prop_mixed_precision", attrs=attrs)
inputs, expects, args = gen_data(shape, dtype, lr, momentum, rho, epsilon)
outputs = utils.mod_launch(mod, args, outputs=(0, -1, 1, 2), expect=expects)
# output type: fp32, fp16, fp32, fp32
precision = [get_rtol_atol("apply_rms_prop", e.dtype) for e in expects]
results = list(map(lambda x, y, p: compare_tensor(x, y, rtol=p[0], atol=p[1]), outputs, expects, precision))
return inputs, outputs, expects, all(results)
def gen_data(shape, dtype, lr, momentum, rho, epsilon):
"""Generates input, output and expect data."""
var = random_gaussian(shape, miu=10, sigma=1.0).astype(dtype)
ms = np.abs(random_gaussian(shape, miu=4, sigma=0.1).astype(dtype))
mom = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
grad = random_gaussian(shape, miu=3, sigma=0.3).astype(dtype)
lr = np.array([lr]).astype(dtype)
momentum = np.array([momentum]).astype(dtype)
rho = np.array([rho]).astype(dtype)
inputs = [var, ms, mom, grad, lr, momentum, rho]
# ms = rho * ms + (1-rho) * grad * grad
# mom = momentum * mom + lr * grad / sqrt(ms + epsilon)
# var = var - mom
one = np.array([1.0]).astype(dtype)
ms_1 = rho * ms
ms_2 = (one - rho) * grad * grad
ms_update = ms_1 + ms_2
mom_1 = momentum * mom
mom_2_1 = lr * grad
mom_2_2 = one / np.sqrt(ms_update + epsilon)
mom_3 = mom_2_1 * mom_2_2
mom_update = mom_1 + mom_3
var_update = var - mom_update
expects = (var_update, var_update.astype("float16"), ms_update, mom_update)
outputs = np.full(var_update.shape, np.nan, "float16")
args = [*inputs, outputs]
return inputs, expects, args
| 39.384615
| 112
| 0.687826
|
a24269102d87b3908b73b6d7c7cfaa042bd6a825
| 4,982
|
py
|
Python
|
tesp/envs/reset_wrapper.py
|
hyyh28/tesp
|
8109b39011e05545453950c918b14da07e70fad3
|
[
"MIT"
] | 29
|
2019-05-18T12:18:34.000Z
|
2022-03-30T01:46:48.000Z
|
tesp/envs/reset_wrapper.py
|
kivo360/tesp
|
a77d9c228a6891b304e789ba2758a4cbfdb75ec0
|
[
"MIT"
] | 8
|
2019-08-15T05:42:10.000Z
|
2021-05-21T09:41:15.000Z
|
tesp/envs/reset_wrapper.py
|
kivo360/tesp
|
a77d9c228a6891b304e789ba2758a4cbfdb75ec0
|
[
"MIT"
] | 8
|
2019-07-15T22:36:20.000Z
|
2020-08-09T07:03:26.000Z
|
# -*- coding: utf-8 -*-
# @Author : Lin Lan (ryan.linlan@gmail.com)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import gym
import ray
# from ray.experimental import named_actors
class ResetWrapper(gym.Wrapper):
def __init__(self, env, env_config):
assert not isinstance(env, self.__class__)
gym.Wrapper.__init__(self, env)
self.env_config = env_config
self.reset_args_holder = self.env_config.get("reset_args_holder")
# set the following attribute in MAMLPolicyEvaluator.reset_sample
self.with_reset_args = None
@property
def reset_args_config(self):
return self.env.reset_args_config
def sample_reset_args(self, rng, num_train,
num_test_1=None, num_test_2=None):
num_test_1 = num_test_1 or num_train
num_test_2 = num_test_2 or num_train
ret_train, ret_test_1, ret_test_2 = \
self._sample_reset_args_near_and_far(
rng, num_train, num_test_1, num_test_2)
ret_train = np.stack(ret_train)
ret_test_1 = np.stack(ret_test_1)
ret_test_2 = np.stack(ret_test_2)
return ret_train, ret_test_1, ret_test_2
def _sample_reset_args_near_and_far(
self, rng, num_train, num_test_1, num_test_2):
low = self.reset_args_config["low"]
high = self.reset_args_config["high"]
threshold = self.reset_args_config["threshold"]
sample_threshold = self.reset_args_config["sample_threshold"]
sample_func = self.env.sample_reset_args_func(rng, low, high)
ret_train = []
ret_test_1 = []
ret_test_2 = []
while len(ret_train) < num_train:
tmp = sample_func()
if 0.2 < np.linalg.norm(tmp) < threshold:
if not any([np.linalg.norm(tmp - x) < sample_threshold
for x in ret_train]):
ret_train.append(tmp)
while len(ret_test_1) < num_test_1:
tmp = sample_func()
if 0.2 < np.linalg.norm(tmp) < threshold:
if not any([np.linalg.norm(tmp - x) < sample_threshold
for x in ret_test_1 + ret_train]):
ret_test_1.append(tmp)
while len(ret_test_2) < num_test_2:
tmp = sample_func()
if threshold < np.linalg.norm(tmp) < high:
if not any([np.linalg.norm(tmp - x) < sample_threshold
for x in ret_test_2]):
ret_test_2.append(tmp)
return ret_train, ret_test_1, ret_test_2
def _sample_reset_args_left_and_right(
self, rng, num_train, num_test_1, num_test_2):
left_low = [-2.0, -2.0]
left_high = [0.0, 2.0]
right_low = [0.0, -2.0]
right_high = [2.0, 2.0]
left_sample_func = self.env.sample_reset_args_func(
rng, left_low, left_high)
right_sample_func = self.env.sample_reset_args_func(
rng, right_low, right_high)
ret_train = []
ret_test_1 = []
ret_test_2 = []
while len(ret_train) < num_train:
tmp = right_sample_func()
if not any([np.allclose(tmp, x, atol=0.01) for x in ret_train]):
ret_train.append(tmp)
while len(ret_test_1) < num_test_1:
tmp = right_sample_func()
if not any([np.allclose(tmp, x, atol=0.01)
for x in ret_train + ret_test_1]):
ret_test_1.append(tmp)
while len(ret_test_2) < num_test_2:
tmp = left_sample_func()
if not any([np.allclose(tmp, x, atol=0.01) for x in ret_test_2]):
ret_test_2.append(tmp)
return ret_train, ret_test_1, ret_test_2
def reset(self):
# reset_args = ray.get(
# named_actors.get_actor("reset_args").get.remote())
if self.with_reset_args:
this_reset_args = self.reset_args
else:
# reset_args = ray.get(self.reset_args_holder.get.remote())
# this_reset_args = reset_args[self.env_config.worker_index - 1]
this_reset_args = ray.get(
self.reset_args_holder.get_at.remote(
self.env_config.worker_index - 1))
self.reset_args = this_reset_args
self.with_reset_args = True
return self.env.reset(this_reset_args)
def step(self, action):
return self.env.step(action)
@ray.remote(num_cpus=1)
class ResetArgsHolder(object):
def __init__(self, shape):
self.shape = tuple(shape)
self.args = np.zeros(shape)
def get(self):
return self.args
def set(self, args):
assert args.shape == self.shape
self.args = args
def get_at(self, index):
return self.args[index]
# def set_at(self, index, args):
# self.args[index] = args
| 37.179104
| 77
| 0.600763
|
5820bdd356cd4a38c3ebb8fba54d07b88eee2fad
| 2,668
|
py
|
Python
|
examples/dfp/v201505/custom_targeting_service/delete_custom_targeting_keys.py
|
wbrp/googleads-python-lib
|
c0f8ce6c4acfe88ce8f913a4f0e0e92b548e1022
|
[
"Apache-2.0"
] | 1
|
2020-05-23T11:32:32.000Z
|
2020-05-23T11:32:32.000Z
|
examples/dfp/v201505/custom_targeting_service/delete_custom_targeting_keys.py
|
cmm08/googleads-python-lib
|
97743df32eff92cf00cb8beaddcda42dfa0a37f4
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201505/custom_targeting_service/delete_custom_targeting_keys.py
|
cmm08/googleads-python-lib
|
97743df32eff92cf00cb8beaddcda42dfa0a37f4
|
[
"Apache-2.0"
] | 2
|
2018-04-20T02:16:33.000Z
|
2020-11-12T20:58:54.000Z
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deletes a custom targeting key by its name.
To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
KEY_NAME = 'INSERT_CUSTOM_TARGETING_KEY_NAME_HERE'
def main(client, key_name):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201505')
values = [{
'key': 'name',
'value': {
'xsi_type': 'TextValue',
'value': key_name
}
}]
query = 'WHERE name = :name'
statement = dfp.FilterStatement(query, values)
deleted_custom_targeting_keys = 0
# Get custom targeting keys.
while True:
response = custom_targeting_service.getCustomTargetingKeysByStatement(
statement.ToStatement())
if 'results' in response:
key_ids = [key['id'] for key in response['results']]
action = {'xsi_type': 'DeleteCustomTargetingKeys'}
key_query = 'WHERE id IN (%s)' % ', '.join(key_ids)
key_statement = dfp.FilterStatement(key_query)
# Delete custom targeting keys.
result = custom_targeting_service.performCustomTargetingKeyAction(
action, key_statement.ToStatement())
if result and int(result['numChanges']) > 0:
deleted_custom_targeting_keys += int(result['numChanges'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
if deleted_custom_targeting_keys > 0:
print ('Number of custom targeting keys deleted: %s'
% deleted_custom_targeting_keys)
else:
print 'No custom targeting keys were deleted.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, KEY_NAME)
| 32.144578
| 77
| 0.723013
|
a083656cb77b94d3f10df276780161dce582fd98
| 3,306
|
py
|
Python
|
src/weconnect/settings/production.py
|
SriSatyaLokesh/weconnect
|
5c97ee9d1207a1a65021b2ea43bb445b01f64af0
|
[
"MIT"
] | null | null | null |
src/weconnect/settings/production.py
|
SriSatyaLokesh/weconnect
|
5c97ee9d1207a1a65021b2ea43bb445b01f64af0
|
[
"MIT"
] | 8
|
2020-06-06T00:38:48.000Z
|
2021-11-15T17:50:37.000Z
|
src/weconnect/settings/production.py
|
SriSatyaLokesh/weconnect
|
5c97ee9d1207a1a65021b2ea43bb445b01f64af0
|
[
"MIT"
] | null | null | null |
"""
Django settings for weconnect project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(^)xpu7m-5-2vm$6h&4t$g2$2!r^5s5**hbsuueb-2lo_61+2f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'rest_framework',
'accounts',
'hashtags',
'tweets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weconnect.urls'
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = LOGIN_REDIRECT_URL
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weconnect.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 25.236641
| 91
| 0.69389
|
5ea1e603f2a170a01b53a0e1cd6329a6fc138c67
| 1,267
|
py
|
Python
|
test.py
|
idozahavy/NeuralNetwork
|
4a3d66a7fa92afad0dbe4d4fc4ef2dc76b844c3f
|
[
"MIT"
] | 1
|
2020-07-05T13:42:55.000Z
|
2020-07-05T13:42:55.000Z
|
test.py
|
idozahavy/NeuralNetwork
|
4a3d66a7fa92afad0dbe4d4fc4ef2dc76b844c3f
|
[
"MIT"
] | null | null | null |
test.py
|
idozahavy/NeuralNetwork
|
4a3d66a7fa92afad0dbe4d4fc4ef2dc76b844c3f
|
[
"MIT"
] | null | null | null |
from sympy import Symbol
from AILostCause.BetterNode import Node
x = Symbol("x")
y = x ** 2 + 5 * (x+4)
result = y.subs(x, 5)
print(type(result))
print(result)
print(type(y))
print(y)
print(y.diff("x"))
start_node = Node()
second_node = Node()
third_node = Node()
output_node = Node()
output_node.AddInputNode(third_node)
third_node.AddInputNode(second_node)
second_node.AddInputNode(start_node)
start_node.SetActivation(0.1)
output_node.CalcActivation()
print(f"Activation = {output_node.activation}")
formula = output_node.FormulateFactorNodeActivation(second_node.input_links[0])
print(f"first conn formula = {formula}")
diff_start = formula.diff("x")
print(f"diff formula = {diff_start}")
diff_start_5 = diff_start.subs(x, 0.1).evalf()
print(f"diff formula at point 0.1 = {diff_start_5}")
print()
print()
second_node.input_links[0].factor += (10 ** 14)
output_node.CalcActivation(recalculate=True)
print(f"Activation = {output_node.activation}")
formula = output_node.FormulateFactorNodeActivation(second_node.input_links[0])
print(f"first conn formula = {formula}")
diff_start = formula.diff("x")
print(f"diff formula = {diff_start}")
diff_start_5 = diff_start.subs(x, 0.1).evalf()
print(f"diff formula at point 0.1 = {diff_start_5}")
print()
print()
| 24.843137
| 79
| 0.749803
|
5b1a011cce8aa83d478e7ca5e90fddf8b32278b0
| 9,254
|
py
|
Python
|
agent.py
|
fabiocorreacordeiro/Train-a-Smartcab-to-Drive
|
b83ed543141ab9da95cb4c13ed30eedd99ba5674
|
[
"MIT"
] | null | null | null |
agent.py
|
fabiocorreacordeiro/Train-a-Smartcab-to-Drive
|
b83ed543141ab9da95cb4c13ed30eedd99ba5674
|
[
"MIT"
] | null | null | null |
agent.py
|
fabiocorreacordeiro/Train-a-Smartcab-to-Drive
|
b83ed543141ab9da95cb4c13ed30eedd99ba5674
|
[
"MIT"
] | null | null | null |
import random
import math
import numpy as np
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
class LearningAgent(Agent):
""" An agent that learns to drive in the Smartcab world.
This is the object you will be modifying. """
def __init__(self, env, learning=False, epsilon=1.0, alpha=0.5, trial=0.0):
super(LearningAgent, self).__init__(env) # Set the agent in the evironment
self.planner = RoutePlanner(self.env, self) # Create a route planner
self.valid_actions = self.env.valid_actions # The set of valid actions
# Set parameters of the learning agent
self.learning = learning # Whether the agent is expected to learn
self.Q = dict() # Create a Q-table which will be a dictionary of tuples
self.epsilon = epsilon # Random exploration factor
self.alpha = alpha # Learning factor
###########
## TO DO ##
###########
# Set any additional class parameters as needed
self.trial = trial
def reset(self, destination=None, testing=False):
""" The reset function is called at the beginning of each trial.
'testing' is set to True if testing trials are being used
once training trials have completed. """
# Select the destination as the new location to route to
self.planner.route_to(destination)
###########
## TO DO ##
###########
# Update epsilon using a decay function of your choice
# Update additional class parameters as needed
# If 'testing' is True, set epsilon and alpha to 0
if testing:
self.epsilon = 0
else:
# 'Linear'
if self.epsilon > 0.001:
self.epsilon = self.epsilon - 0.001
else:
self.epsilon = 0
# "Decaimento exponencial"
#self.epsilon = self.epsilon * 0.992
#self.trial = self.trial + 1
#self.epsilon = 1.0 / (1 + (self.trial * 0.03) ** 2.0)
#self.epsilon = 1.0 / (self.trial)
# "Cosseno"
#if self.epsilon > 0.01:
# self.trial = self.trial + 1
# self.epsilon = math.cos(0.0015 * self.trial)
# self.epsilon = (math.cos(0.01 * self.trial) + 1) / 2
#else:
# self.epsilon = 0
# "Exp()"
#self.trial = self.trial + 1
#self.epsilon = math.exp(-1 * self.trial)
# Constant
#self.trial = self.trial + 1
#if self.trial > 1000:
# self.epsilon = 0.0
return None
def build_state(self):
""" The build_state function is called when the agent requests data from the
environment. The next waypoint, the intersection inputs, and the deadline
are all features available to the agent. """
# Collect data about the environment
waypoint = self.planner.next_waypoint() # The next waypoint
inputs = self.env.sense(self) # Visual input - intersection light and traffic
deadline = self.env.get_deadline(self) # Remaining deadline
###########
## TO DO ##
###########
# NOTE : you are not allowed to engineer features outside of the inputs available.
# Because the aim of this project is to teach Reinforcement Learning, we have placed
# constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.
# With the hand-engineered features, this learning process gets entirely negated.
# 'Light'
light = inputs['light']
# Car Left
car_left = inputs['left']
# Car Oncoming
car_oncoming = inputs['oncoming']
# Car Right
car_right = inputs['right']
# Waypoint
waypoint
# Set 'state' as a tuple of relevant data for the agent
state = (light, car_left, car_oncoming, car_right, waypoint)
return state
def get_maxQ(self, state):
""" The get_max_Q function is called when the agent is asked to find the
maximum Q-value of all actions based on the 'state' the smartcab is in. """
###########
## TO DO ##
###########
# Calculate the maximum Q-value of all actions for a given state
maxQ = max(self.Q[state], key=self.Q[state].get)
return maxQ
def createQ(self, state):
""" The createQ function is called when a state is generated by the agent. """
###########
## TO DO ##
###########
# When learning, check if the 'state' is not in the Q-table
# If it is not, create a new dictionary for that state
# Then, for each action available, set the initial Q-value to 0.0
if state not in self.Q:
self.Q[state] = {None : 0.0, 'left': 0.0, 'forward': 0.0, 'right': 0.0}
return
def choose_action(self, state):
""" The choose_action function is called when the agent is asked to choose
which action to take, based on the 'state' the smartcab is in. """
# Set the agent state and default action
self.state = state
self.next_waypoint = self.planner.next_waypoint()
action = None
###########
## TO DO ##
###########
# When not learning, choose a random action
actions = [None, 'left', 'forward', 'right']
random_action = random.choice(actions)
# When learning, choose a random action with 'epsilon' probability
# Otherwise, choose an action with the highest Q-value for the current state
# Be sure that when choosing an action with highest Q-value that you randomly select between actions that "tie".
highest_Q = self.get_maxQ(state)
action = np.random.choice([random_action, highest_Q], p=[self.epsilon, 1 - self.epsilon])
return action
def learn(self, state, action, reward):
""" The learn function is called after the agent completes an action and
receives a reward. This function does not consider future rewards
when conducting learning. """
###########
## TO DO ##
###########
# When learning, implement the value iteration update rule
# Use only the learning rate 'alpha' (do not use the discount factor 'gamma')
self.Q[state][action] = reward + self.alpha * self.Q[state][action]
return
def update(self):
""" The update function is called when a time step is completed in the
environment for a given trial. This function will build the agent
state, choose an action, receive a reward, and learn if enabled. """
state = self.build_state() # Get current state
self.createQ(state) # Create 'state' in Q-table
action = self.choose_action(state) # Choose an action
reward = self.env.act(self, action) # Receive a reward
self.learn(state, action, reward) # Q-learn
return
def run():
""" Driving function for running the simulation.
Press ESC to close the simulation, or [SPACE] to pause the simulation. """
##############
# Create the environment
# Flags:
# verbose - set to True to display additional output from the simulation
# num_dummies - discrete number of dummy agents in the environment, default is 100
# grid_size - discrete number of intersections (columns, rows), default is (8, 6)
env = Environment(verbose = True)
##############
# Create the driving agent
# Flags:
# learning - set to True to force the driving agent to use Q-learning
# * epsilon - continuous value for the exploration factor, default is 1
# * alpha - continuous value for the learning rate, default is 0.5
agent = env.create_agent(LearningAgent, learning = True, alpha = 0.5, epsilon=1)
##############
# Follow the driving agent
# Flags:
# enforce_deadline - set to True to enforce a deadline metric
env.set_primary_agent(agent, enforce_deadline = True)
##############
# Create the simulation
# Flags:
# update_delay - continuous time (in seconds) between actions, default is 2.0 seconds
# display - set to False to disable the GUI if PyGame is enabled
# log_metrics - set to True to log trial and simulation results to /logs
# optimized - set to True to change the default log file name
sim = Simulator(env, update_delay = 0.001, log_metrics = True, display = False, optimized = True)
##############
# Run the simulationator
# Flags:
# tolerance - epsilon tolerance before beginning testing, default is 0.05
# n_test - discrete number of testing trials to perform, default is 0
sim.run(n_test = 100, tolerance = 0.001)
if __name__ == '__main__':
run()
| 36.722222
| 151
| 0.587962
|
95f16a8a12935d0bdf1a11f5b55a59ca9ed3b7bb
| 984
|
py
|
Python
|
site_guitare/urls.py
|
Sieltek/site_guitare
|
36a2d0b10f77d093ad2f210f1d7f5d1442966189
|
[
"MIT"
] | null | null | null |
site_guitare/urls.py
|
Sieltek/site_guitare
|
36a2d0b10f77d093ad2f210f1d7f5d1442966189
|
[
"MIT"
] | 2
|
2021-03-19T08:25:54.000Z
|
2021-03-19T08:30:09.000Z
|
site_guitare/urls.py
|
Sieltek/site_guitare
|
36a2d0b10f77d093ad2f210f1d7f5d1442966189
|
[
"MIT"
] | null | null | null |
"""site_guitare URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('guide_guitare.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.444444
| 80
| 0.726626
|
7a9e74000e6ff939dcf50bffcfcb0c9017016bfd
| 18,979
|
py
|
Python
|
scripts/wk/os/win.py
|
2Shirt/WizardK
|
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
|
[
"MIT"
] | null | null | null |
scripts/wk/os/win.py
|
2Shirt/WizardK
|
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
|
[
"MIT"
] | 178
|
2017-11-17T19:14:31.000Z
|
2021-12-15T07:43:29.000Z
|
scripts/wk/os/win.py
|
2Shirt/WizardK
|
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
|
[
"MIT"
] | 1
|
2017-11-17T19:32:36.000Z
|
2017-11-17T19:32:36.000Z
|
"""WizardKit: Windows Functions"""
# vim: sts=2 sw=2 ts=2
import ctypes
import logging
import os
import pathlib
import platform
from contextlib import suppress
import psutil
try:
import winreg
except ImportError as err:
if platform.system() == 'Windows':
raise err
from wk.borrowed import acpi
from wk.cfg.main import KIT_NAME_FULL
from wk.cfg.windows_builds import (
OLDEST_SUPPORTED_BUILD,
OUTDATED_BUILD_NUMBERS,
WINDOWS_BUILDS,
)
from wk.exe import get_json_from_command, run_program
from wk.kit.tools import find_kit_dir
from wk.std import (
GenericError,
GenericWarning,
bytes_to_string,
color_string,
sleep,
)
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
ARCH = '64' if platform.architecture()[0] == '64bit' else '32'
CONEMU = 'ConEmuPID' in os.environ
KNOWN_DATA_TYPES = {
'BINARY': winreg.REG_BINARY,
'DWORD': winreg.REG_DWORD,
'DWORD_LITTLE_ENDIAN': winreg.REG_DWORD_LITTLE_ENDIAN,
'DWORD_BIG_ENDIAN': winreg.REG_DWORD_BIG_ENDIAN,
'EXPAND_SZ': winreg.REG_EXPAND_SZ,
'LINK': winreg.REG_LINK,
'MULTI_SZ': winreg.REG_MULTI_SZ,
'NONE': winreg.REG_NONE,
'QWORD': winreg.REG_QWORD,
'QWORD_LITTLE_ENDIAN': winreg.REG_QWORD_LITTLE_ENDIAN,
'SZ': winreg.REG_SZ,
}
KNOWN_HIVES = {
'HKCR': winreg.HKEY_CLASSES_ROOT,
'HKCU': winreg.HKEY_CURRENT_USER,
'HKLM': winreg.HKEY_LOCAL_MACHINE,
'HKU': winreg.HKEY_USERS,
'HKEY_CLASSES_ROOT': winreg.HKEY_CLASSES_ROOT,
'HKEY_CURRENT_USER': winreg.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': winreg.HKEY_LOCAL_MACHINE,
'HKEY_USERS': winreg.HKEY_USERS,
}
KNOWN_HIVE_NAMES = {
winreg.HKEY_CLASSES_ROOT: 'HKCR',
winreg.HKEY_CURRENT_USER: 'HKCU',
winreg.HKEY_LOCAL_MACHINE: 'HKLM',
winreg.HKEY_USERS: 'HKU',
}
OS_VERSION = platform.win32_ver()[0]
OS_VERSION = 8.1 if OS_VERSION == '8.1' else int(OS_VERSION)
RAM_OK = 5.5 * 1024**3 # ~6 GiB assuming a bit of shared memory
RAM_WARNING = 3.5 * 1024**3 # ~4 GiB assuming a bit of shared memory
REG_MSISERVER = r'HKLM\SYSTEM\CurrentControlSet\Control\SafeBoot\Network\MSIServer'
SLMGR = pathlib.Path(f'{os.environ.get("SYSTEMROOT")}/System32/slmgr.vbs')
# Activation Functions
def activate_with_bios():
"""Attempt to activate Windows with a key stored in the BIOS."""
# Code borrowed from https://github.com/aeruder/get_win8key
#####################################################
#script to query windows 8.x OEM key from PC firmware
#ACPI -> table MSDM -> raw content -> byte offset 56 to end
#ck, 03-Jan-2014 (christian@korneck.de)
#####################################################
bios_key = None
table = b"MSDM"
# Check if activation is needed
if is_activated():
raise GenericWarning('System already activated')
# Get BIOS key
if acpi.FindAcpiTable(table) is True:
rawtable = acpi.GetAcpiTable(table)
#http://msdn.microsoft.com/library/windows/hardware/hh673514
#byte offset 36 from beginning
# = Microsoft 'software licensing data structure'
# / 36 + 20 bytes offset from beginning = Win Key
bios_key = rawtable[56:len(rawtable)].decode("utf-8")
if not bios_key:
raise GenericError('BIOS key not found.')
# Install Key
cmd = ['cscript', '//nologo', SLMGR, '/ipk', bios_key]
run_program(cmd, check=False)
sleep(5)
# Attempt activation
cmd = ['cscript', '//nologo', SLMGR, '/ato']
run_program(cmd, check=False)
sleep(5)
# Check status
if not is_activated():
raise GenericError('Activation Failed')
def get_activation_string():
"""Get activation status, returns str."""
cmd = ['cscript', '//nologo', SLMGR, '/xpr']
proc = run_program(cmd, check=False)
act_str = proc.stdout
act_str = act_str.splitlines()[1]
act_str = act_str.strip()
return act_str
def is_activated():
"""Check if Windows is activated via slmgr.vbs and return bool."""
act_str = get_activation_string()
# Check result.
return act_str and 'permanent' in act_str
# Date / Time functions
def get_timezone():
"""Get current timezone using tzutil, returns str."""
cmd = ['tzutil', '/g']
proc = run_program(cmd, check=False)
return proc.stdout
def set_timezone(zone):
"""Set current timezone using tzutil."""
cmd = ['tzutil', '/s', zone]
run_program(cmd, check=False)
# Info Functions
def check_4k_alignment(show_alert=False):
"""Check if all partitions are 4K aligned, returns book."""
cmd = ['WMIC', 'partition', 'get', 'StartingOffset']
# Check offsets
proc = run_program(cmd)
for offset in proc.stdout.splitlines():
offset = offset.strip()
if not offset.isnumeric():
continue
if int(offset) % 4096 != 0:
# Not aligned
if show_alert:
show_alert_box('One or more partitions are not 4K aligned')
raise GenericError('One or more partitions are not 4K aligned')
def get_installed_antivirus():
"""Get list of installed antivirus programs, returns list."""
cmd = [
'WMIC', r'/namespace:\\root\SecurityCenter2',
'path', 'AntivirusProduct',
'get', 'displayName', '/value',
]
products = []
report = []
# Get list of products
proc = run_program(cmd)
for line in proc.stdout.splitlines():
line = line.strip()
if '=' in line:
products.append(line.split('=')[1])
# Check product(s) status
for product in sorted(products):
cmd = [
'WMIC', r'/namespace:\\root\SecurityCenter2',
'path', 'AntivirusProduct',
'where', f'displayName="{product}"',
'get', 'productState', '/value',
]
proc = run_program(cmd)
state = proc.stdout.split('=')[1]
state = hex(int(state))
if str(state)[3:5] not in ['10', '11']:
report.append(color_string(f'[Disabled] {product}', 'YELLOW'))
else:
report.append(product)
# Final check
if not report:
report.append(color_string('No products detected', 'RED'))
# Done
return report
def get_installed_ram(as_list=False, raise_exceptions=False):
"""Get installed RAM."""
mem = psutil.virtual_memory()
mem_str = bytes_to_string(mem.total, decimals=1)
# Raise exception if necessary
if raise_exceptions:
if RAM_OK > mem.total >= RAM_WARNING:
raise GenericWarning(mem_str)
if mem.total < RAM_WARNING:
raise GenericError(mem_str)
# Done
return [mem_str] if as_list else mem_str
def get_os_activation(as_list=False, check=True):
"""Get OS activation status, returns str.
NOTE: If check=True then raise an exception if OS isn't activated.
"""
act_str = get_activation_string()
if check and not is_activated():
if 'unavailable' in act_str.lower():
raise GenericWarning(act_str)
# Else
raise GenericError(act_str)
# Done
return [act_str] if as_list else act_str
def get_os_name(as_list=False, check=True):
"""Build OS display name, returns str.
NOTE: If check=True then an exception is raised if the OS version is
outdated or unsupported.
"""
key = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
build_version = int(reg_read_value("HKLM", key, "CurrentBuild"))
build_version_full = platform.win32_ver()[1]
details = WINDOWS_BUILDS.get(build_version_full, f'Build {build_version}')
display_name = (
f'{reg_read_value("HKLM", key, "ProductName")} {ARCH}-bit {details}'
)
# Check for support issues
if check:
if build_version in OUTDATED_BUILD_NUMBERS:
raise GenericWarning(f'{display_name} (outdated)')
if build_version < OLDEST_SUPPORTED_BUILD:
raise GenericError(f'{display_name} (unsupported)')
# Done
return [display_name] if as_list else display_name
def get_raw_disks():
"""Get all disks without a partiton table, returns list."""
script_path = find_kit_dir('Scripts').joinpath('get_raw_disks.ps1')
cmd = ['PowerShell', '-ExecutionPolicy', 'Bypass', '-File', script_path]
json_data = get_json_from_command(cmd)
raw_disks = []
# Bail early
if not json_data:
# No RAW disks detected
return raw_disks
# Fix JSON if only one disk was detected
if isinstance(json_data, dict):
json_data = [json_data]
# Parse JSON
for disk in json_data:
size_str = bytes_to_string(int(disk["Size"]), use_binary=False)
raw_disks.append(f'{disk["FriendlyName"]} ({size_str})')
# Done
return raw_disks
def get_volume_usage(use_colors=False):
"""Get space usage info for all fixed volumes, returns list."""
report = []
for disk in psutil.disk_partitions():
if 'fixed' not in disk.opts:
continue
total, _, free, percent = psutil.disk_usage(disk.device)
color = None
if percent > 85:
color = 'RED'
elif percent > 75:
color = 'YELLOW'
display_str = (
f'{free/total:>5.2%} Free'
f' ({bytes_to_string(free, 2):>10} / {bytes_to_string(total, 2):>10})'
)
if use_colors:
display_str = color_string(display_str, color)
report.append(f'{disk.device} {display_str}')
# Done
return report
def show_alert_box(message, title=None):
"""Show Windows alert box with message."""
title = title if title else f'{KIT_NAME_FULL} Warning'
message_box = ctypes.windll.user32.MessageBoxW
message_box(None, message, title, 0x00001030)
# Registry Functions
def reg_delete_key(hive, key, recurse=False):
# pylint: disable=raise-missing-from
"""Delete a key from the registry.
NOTE: If recurse is False then it will only work on empty keys.
"""
hive = reg_get_hive(hive)
hive_name = KNOWN_HIVE_NAMES.get(hive, '???')
# Delete subkeys first
if recurse:
with suppress(OSError), winreg.OpenKey(hive, key) as open_key:
while True:
subkey = fr'{key}\{winreg.EnumKey(open_key, 0)}'
reg_delete_key(hive, subkey, recurse=recurse)
# Delete key
try:
winreg.DeleteKey(hive, key)
LOG.warning(r'Deleting registry key: %s\%s', hive_name, key)
except FileNotFoundError:
# Ignore
pass
except PermissionError:
LOG.error(r'Failed to delete registry key: %s\%s', hive_name, key)
if recurse:
# Re-raise exception
raise
# recurse is not True so assuming we tried to remove a non-empty key
msg = fr'Refusing to remove non-empty key: {hive_name}\{key}'
raise FileExistsError(msg)
def reg_delete_value(hive, key, value):
"""Delete a value from the registry."""
access = winreg.KEY_ALL_ACCESS
hive = reg_get_hive(hive)
hive_name = KNOWN_HIVE_NAMES.get(hive, '???')
# Delete value
with winreg.OpenKey(hive, key, access=access) as open_key:
try:
winreg.DeleteValue(open_key, value)
LOG.warning(
r'Deleting registry value: %s\%s "%s"', hive_name, key, value,
)
except FileNotFoundError:
# Ignore
pass
except PermissionError:
LOG.error(
r'Failed to delete registry value: %s\%s "%s"', hive_name, key, value,
)
# Re-raise exception
raise
def reg_get_hive(hive):
"""Get winreg HKEY constant from string, returns HKEY constant."""
if isinstance(hive, int):
# Assuming we're already a winreg HKEY constant
pass
else:
hive = KNOWN_HIVES[hive.upper()]
# Done
return hive
def reg_get_data_type(data_type):
"""Get registry data type from string, returns winreg constant."""
if isinstance(data_type, int):
# Assuming we're already a winreg value type constant
pass
else:
data_type = KNOWN_DATA_TYPES[data_type.upper()]
# Done
return data_type
def reg_key_exists(hive, key):
"""Test if the specified hive/key exists, returns bool."""
exists = False
hive = reg_get_hive(hive)
# Query key
try:
winreg.QueryValue(hive, key)
except FileNotFoundError:
# Leave set to False
pass
else:
exists = True
# Done
return exists
def reg_read_value(hive, key, value, force_32=False, force_64=False):
"""Query value from hive/hey, returns multiple types.
NOTE: Set value='' to read the default value.
"""
access = winreg.KEY_READ
data = None
hive = reg_get_hive(hive)
# Set access
if force_32:
access = access | winreg.KEY_WOW64_32KEY
elif force_64:
access = access | winreg.KEY_WOW64_64KEY
# Query value
with winreg.OpenKey(hive, key, access=access) as open_key:
# Returning first part of tuple and ignoreing type
data = winreg.QueryValueEx(open_key, value)[0]
# Done
return data
def reg_write_settings(settings):
"""Set registry values in bulk from a custom data structure.
Data structure should be as follows:
EXAMPLE_SETTINGS = {
# See KNOWN_HIVES for valid hives
'HKLM': {
r'Software\\2Shirt\\WizardKit': (
# Value tuples should be in the form:
# (name, data, data-type, option),
# See KNOWN_DATA_TYPES for valid types
# The option item is optional
('Sample Value #1', 'Sample Data', 'SZ'),
('Sample Value #2', 14, 'DWORD'),
),
# An empty key will be created if no values are specified
r'Software\\2Shirt\\WizardKit\\Empty': (),
r'Software\\2Shirt\\WizardKit\\Test': (
('Sample Value #3', 14000000000000, 'QWORD'),
),
},
'HKCU': {
r'Software\\2Shirt\\WizardKit': (
# The 4th item forces using the 32-bit registry
# See reg_set_value() for valid options
('Sample Value #4', 'Sample Data', 'SZ', '32'),
),
},
}
"""
for hive, keys in settings.items():
hive = reg_get_hive(hive)
for key, values in keys.items():
if not values:
# Create an empty key
winreg.CreateKey(hive, key)
for value in values:
reg_set_value(hive, key, *value)
def reg_set_value(hive, key, name, data, data_type, option=None):
# pylint: disable=too-many-arguments
"""Set value for hive/key."""
access = winreg.KEY_WRITE
data_type = reg_get_data_type(data_type)
hive = reg_get_hive(hive)
option = str(option)
# Safety check
if not name and option in ('32', '64'):
raise NotImplementedError(
'Unable to set default values using alternate registry views',
)
# Set access
if option == '32':
access = access | winreg.KEY_WOW64_32KEY
elif option == '64':
access = access | winreg.KEY_WOW64_64KEY
# Create key
winreg.CreateKeyEx(hive, key, access=access)
# Set value
if name:
with winreg.OpenKey(hive, key, access=access) as open_key:
winreg.SetValueEx(open_key, name, 0, data_type, data)
else:
# Set default value instead
winreg.SetValue(hive, key, data_type, data)
# Safe Mode Functions
def disable_safemode():
"""Edit BCD to remove safeboot value."""
cmd = ['bcdedit', '/deletevalue', '{default}', 'safeboot']
run_program(cmd)
def disable_safemode_msi():
"""Disable MSI access under safemode."""
cmd = ['reg', 'delete', REG_MSISERVER, '/f']
run_program(cmd)
def enable_safemode():
"""Edit BCD to set safeboot as default."""
cmd = ['bcdedit', '/set', '{default}', 'safeboot', 'network']
run_program(cmd)
def enable_safemode_msi():
"""Enable MSI access under safemode."""
cmd = ['reg', 'add', REG_MSISERVER, '/f']
run_program(cmd)
cmd = [
'reg', 'add', REG_MSISERVER, '/ve',
'/t', 'REG_SZ',
'/d', 'Service', '/f',
]
run_program(cmd)
# Secure Boot Functions
def is_booted_uefi():
"""Check if booted UEFI or legacy, returns bool."""
kernel = ctypes.windll.kernel32
firmware_type = ctypes.c_uint()
# Get value from kernel32 API (firmware_type is updated by the call)
try:
kernel.GetFirmwareType(ctypes.byref(firmware_type))
except Exception: # pylint: disable=broad-except
# Ignore and set firmware_type back to zero
firmware_type = ctypes.c_uint(0)
# Check result
return firmware_type.value == 2
def is_secure_boot_enabled(raise_exceptions=False, show_alert=False):
"""Check if Secure Boot is enabled, returns bool.
If raise_exceptions is True then an exception is raised with details.
If show_alert is True a popup alert box is shown if it's not enabled.
"""
booted_uefi = is_booted_uefi()
cmd = ['PowerShell', '-Command', 'Confirm-SecureBootUEFI']
enabled = False
msg_error = None
msg_warning = None
# Bail early
if OS_VERSION < 8:
if raise_exceptions:
raise GenericWarning(f'Secure Boot not available for {OS_VERSION}')
return False
# Check results
proc = run_program(cmd, check=False)
if proc.returncode:
# Something went wrong
if booted_uefi:
msg_warning = 'UNKNOWN'
else:
msg_warning = 'DISABLED\n\nOS installed LEGACY'
else:
# Command completed
if 'True' in proc.stdout:
enabled = True
elif 'False' in proc.stdout:
msg_error = 'DISABLED'
else:
msg_warning = 'UNKNOWN'
# Show popup and/or raise exceptions as necessary
for msg, exc in ((msg_error, GenericError), (msg_warning, GenericWarning)):
if not msg:
continue
if show_alert:
show_alert_box(f'Secure Boot {msg}')
if raise_exceptions:
raise exc(msg)
break
# Done
return enabled
# Service Functions
def disable_service(service_name):
"""Set service startup to disabled."""
cmd = ['sc', 'config', service_name, 'start=', 'disabled']
run_program(cmd, check=False)
# Verify service was disabled
if get_service_start_type(service_name) != 'disabled':
raise GenericError(f'Failed to disable service {service_name}')
def enable_service(service_name, start_type='auto'):
"""Enable service by setting start type."""
cmd = ['sc', 'config', service_name, 'start=', start_type]
psutil_type = 'automatic'
if start_type == 'demand':
psutil_type = 'manual'
# Enable service
run_program(cmd, check=False)
# Verify service was enabled
if get_service_start_type(service_name) != psutil_type:
raise GenericError(f'Failed to enable service {service_name}')
def get_service_status(service_name):
"""Get service status using psutil, returns str."""
status = 'unknown'
try:
service = psutil.win_service_get(service_name)
status = service.status()
except psutil.NoSuchProcess:
status = 'missing?'
return status
def get_service_start_type(service_name):
"""Get service startup type using psutil, returns str."""
start_type = 'unknown'
try:
service = psutil.win_service_get(service_name)
start_type = service.start_type()
except psutil.NoSuchProcess:
start_type = 'missing?'
return start_type
def start_service(service_name):
"""Stop service."""
cmd = ['net', 'start', service_name]
run_program(cmd, check=False)
# Verify service was started
if not get_service_status(service_name) in ('running', 'start_pending'):
raise GenericError(f'Failed to start service {service_name}')
def stop_service(service_name):
"""Stop service."""
cmd = ['net', 'stop', service_name]
run_program(cmd, check=False)
# Verify service was stopped
if not get_service_status(service_name) == 'stopped':
raise GenericError(f'Failed to stop service {service_name}')
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| 27.465991
| 83
| 0.678592
|
5f53e188c48749de05414dbf8bf2a24e37a1553d
| 108
|
py
|
Python
|
python_exercises/27Local_Library/library/catalog/urls.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_exercises/27Local_Library/library/catalog/urls.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_exercises/27Local_Library/library/catalog/urls.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
]
| 13.5
| 38
| 0.657407
|
7f54cc5591bcfe88f86178327f5745a208117ea7
| 479
|
py
|
Python
|
example/example/migrations/0001_initial.py
|
ebanalyse/django-nested-form-field
|
5872f2a60676948d4d54332e30e01b46536af323
|
[
"MIT"
] | 1
|
2021-05-25T20:41:38.000Z
|
2021-05-25T20:41:38.000Z
|
example/example/migrations/0001_initial.py
|
ebanalyse/django-nested-form-field
|
5872f2a60676948d4d54332e30e01b46536af323
|
[
"MIT"
] | 1
|
2021-05-25T21:00:09.000Z
|
2021-05-25T21:00:09.000Z
|
example/example/migrations/0001_initial.py
|
nielslerches/django-nested-form-field
|
5872f2a60676948d4d54332e30e01b46536af323
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-22 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nested_form', models.JSONField()),
],
),
]
| 21.772727
| 114
| 0.569937
|
0a90ebe6373572111d488cc103024c637cf0733a
| 2,432
|
py
|
Python
|
docs/src/headers_example.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
docs/src/headers_example.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
docs/src/headers_example.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import inspect
from typing import Any, Mapping, Optional
from di import Container, Dependant, Depends
class Request:
def __init__(self, headers: Mapping[str, str]) -> None:
self.headers = {k.lower(): v for k, v in headers.items()}
class HeaderDependant(Dependant[Any]):
def __init__(self, alias: Optional[str]) -> None:
self.alias = alias
super().__init__(call=None, scope=None, share=False)
def register_parameter(self, param: inspect.Parameter) -> HeaderDependant:
if self.alias is not None:
name = self.alias
else:
name = param.name.replace("_", "-")
def get_header(request: Request = Depends()) -> str:
return param.annotation(request.headers[name])
self.call = get_header
# We could return a copy here to allow the same Dependant
# to be used in multiple places like
# dep = HeaderDependant(...)
# def func1(abcd = dep): ...
# def func2(efgh = dep): ...
# In this scenario, `dep` would be modified in func2 to set
# the header name to "efgh", which leads to incorrect results in func1
# The solution is to return a copy here instead of self, so that
# the original instance is never modified in place
return self
def Header(alias: Optional[str] = None) -> Any:
return HeaderDependant(alias=alias) # type: ignore
async def web_framework() -> None:
container = Container()
valid_request = Request(headers={"x-header-one": "one", "x-header-two": "2"})
with container.bind(Dependant(lambda: valid_request), Request):
await container.execute_async(container.solve(Dependant(controller))) # success
invalid_request = Request(headers={"x-header-one": "one"})
with container.bind(Dependant(lambda: invalid_request), Request):
try:
await container.execute_async(
container.solve(Dependant(controller))
) # fails
except KeyError:
pass
else:
raise AssertionError(
"This call should have failed because x-header-two is missing"
)
def controller(
x_header_one: str = Header(), header_two_val: int = Header(alias="x-header-two")
) -> None:
"""This is the only piece of user code"""
assert x_header_one == "one"
assert header_two_val == 2
| 33.777778
| 88
| 0.634457
|
d59507466611ef36fc541b5b883820fd7d545d69
| 1,283
|
py
|
Python
|
tensorflow_manopt/manifolds/__init__.py
|
nagachika/tensorflow-manopt
|
736e300686624b14a1697fd6ed06d4344f94fe90
|
[
"MIT"
] | null | null | null |
tensorflow_manopt/manifolds/__init__.py
|
nagachika/tensorflow-manopt
|
736e300686624b14a1697fd6ed06d4344f94fe90
|
[
"MIT"
] | null | null | null |
tensorflow_manopt/manifolds/__init__.py
|
nagachika/tensorflow-manopt
|
736e300686624b14a1697fd6ed06d4344f94fe90
|
[
"MIT"
] | null | null | null |
from tensorflow_manopt.manifolds.cholesky import Cholesky
from tensorflow_manopt.manifolds.euclidean import Euclidean
from tensorflow_manopt.manifolds.grassmannian import Grassmannian
from tensorflow_manopt.manifolds.hyperboloid import Hyperboloid
from tensorflow_manopt.manifolds.manifold import Manifold
from tensorflow_manopt.manifolds.poincare import Poincare
from tensorflow_manopt.manifolds.product import Product
from tensorflow_manopt.manifolds.special_orthogonal import SpecialOrthogonal
from tensorflow_manopt.manifolds.sphere import Sphere
from tensorflow_manopt.manifolds.stiefel import StiefelCanonical
from tensorflow_manopt.manifolds.stiefel import StiefelCayley
from tensorflow_manopt.manifolds.stiefel import StiefelEuclidean
from tensorflow_manopt.manifolds.symmetric_positive import SPDAffineInvariant
from tensorflow_manopt.manifolds.symmetric_positive import SPDLogCholesky
from tensorflow_manopt.manifolds.symmetric_positive import SPDLogEuclidean
__all__ = [
"Cholesky",
"Euclidean",
"Grassmannian",
"Hyperboloid",
"Manifold",
"Poincare",
"Product",
"SPDAffineInvariant",
"SPDLogCholesky",
"SPDLogEuclidean",
"SpecialOrthogonal",
"Sphere",
"StiefelCanonical",
"StiefelCayley",
"StiefelEuclidean",
]
| 37.735294
| 77
| 0.830086
|
aef4b325cc0df7478a6378296be4c00f847f1872
| 2,749
|
py
|
Python
|
src/nodemgr/vrouter_nodemgr/haproxy_stats.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | 1
|
2019-01-11T06:16:10.000Z
|
2019-01-11T06:16:10.000Z
|
src/nodemgr/vrouter_nodemgr/haproxy_stats.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | null | null | null |
src/nodemgr/vrouter_nodemgr/haproxy_stats.py
|
sagarc-contrail/contrail-controller
|
834302367f3ff81f1ce93f4036b6b3788dfd6994
|
[
"Apache-2.0"
] | 18
|
2017-01-12T09:28:44.000Z
|
2019-04-18T20:47:42.000Z
|
import os
import socket
import sys
import csv
HAPROXY_DIR = '/var/lib/contrail/loadbalancer/haproxy/'
STATS_MAP = {
'active_connections': 'qcur',
'max_connections': 'qmax',
'current_sessions': 'scur',
'max_sessions': 'smax',
'total_sessions': 'stot',
'bytes_in': 'bin',
'bytes_out': 'bout',
'connection_errors': 'econ',
'response_errors': 'eresp',
'status': 'status',
'health': 'check_status',
'failed_checks': 'chkfail'
}
# 1 + 2 + 4 = 7 for frontend + backend + server
REQUEST_TYPE = 7
# response types
TYPE_FRONTEND_RESPONSE = '0'
TYPE_BACKEND_RESPONSE = '1'
TYPE_SERVER_RESPONSE = '2'
class HaproxyStats(object):
def __init__(self):
self.lbaas_dir = HAPROXY_DIR
pass
def get_stats(self, pool_id):
sock_path = os.path.join(self.lbaas_dir, pool_id, 'haproxy.sock')
if not os.path.exists(sock_path):
sys.stderr.write('\nStats socket not found for pool ' + pool_id)
return {}
lb_stats = {}
lb_stats.setdefault('listener', [])
lb_stats.setdefault('pool', [])
lb_stats.setdefault('member', [])
raw_stats = self._read_stats(sock_path)
row_count = 0
for row in csv.DictReader(raw_stats.lstrip('# ').splitlines()):
row_count = row_count + 1
if row.get('type') == TYPE_FRONTEND_RESPONSE:
lb_stats['listener'].append(self._get_stats(row, row['pxname']))
elif row.get('type') == TYPE_BACKEND_RESPONSE:
lb_stats['pool'].append(self._get_stats(row, row['pxname']))
elif row.get('type') == TYPE_SERVER_RESPONSE:
lb_stats['member'].append(self._get_stats(row, row['svname']))
if (row_count == 0):
return {}
return lb_stats
def _get_stats(self, row, name):
stats = dict((k, row.get(v, ''))
for k, v in STATS_MAP.items())
stats['name'] = name
stats['vrouter'] = socket.gethostname()
if stats['status'] in ['no check', 'UP', 'OPEN']:
stats['status'] = 'ACTIVE'
else:
stats['status'] = 'DOWN'
return stats
def _read_stats(self, socket_path):
raw_stats = ''
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
s.send('show stat -1 %s -1\n' % REQUEST_TYPE)
chunk_size = 1024
while True:
chunk = s.recv(chunk_size)
raw_stats += chunk
if len(chunk) < chunk_size:
break
except socket.error as e:
sys.stderr.write('\nstats socket error: ' + str(e))
return raw_stats
| 31.597701
| 80
| 0.568934
|
9cea9ff70578c958a89fea1bec192e644e5ec0af
| 50,445
|
py
|
Python
|
src/python/tests/core/bot/fuzzers/libFuzzer/engine_test.py
|
Montana/clusterfuzz
|
fd2b0be7d680d238e5a426aa061a8669dd63f7cc
|
[
"Apache-2.0"
] | 1
|
2021-02-03T18:12:57.000Z
|
2021-02-03T18:12:57.000Z
|
src/python/tests/core/bot/fuzzers/libFuzzer/engine_test.py
|
Montana/clusterfuzz
|
fd2b0be7d680d238e5a426aa061a8669dd63f7cc
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/bot/fuzzers/libFuzzer/engine_test.py
|
Montana/clusterfuzz
|
fd2b0be7d680d238e5a426aa061a8669dd63f7cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for libFuzzer engine."""
# pylint: disable=unused-argument
from future import standard_library
standard_library.install_aliases()
import os
import shutil
import tempfile
import unittest
import mock
import parameterized
import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
import six
from bot.fuzzers import engine_common
from bot.fuzzers import libfuzzer
from bot.fuzzers import strategy_selection
from bot.fuzzers import utils as fuzzer_utils
from bot.fuzzers.libFuzzer import constants
from bot.fuzzers.libFuzzer import engine
from build_management import build_manager
from fuzzing import strategy
from metrics import logs
from platforms import android
from system import environment
from system import new_process
from system import shell
from tests.test_libs import android_helpers
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
try:
from shlex import quote
except ImportError:
from pipes import quote
TEST_PATH = os.path.abspath(os.path.dirname(__file__))
TEST_DIR = os.path.join(TEST_PATH, 'libfuzzer_test_data')
TEMP_DIR = os.path.join(TEST_PATH, 'temp')
DATA_DIR = os.path.join(TEST_PATH, 'data')
ANDROID_DATA_DIR = os.path.join(DATA_DIR, 'android')
_get_directory_file_count_orig = shell.get_directory_file_count
class PrepareTest(fake_fs_unittest.TestCase):
"""Prepare() tests."""
def setUp(self):
# Set up fake filesystem.
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
test_helpers.patch(self, [
'bot.fuzzers.engine_common.unpack_seed_corpus_if_needed',
])
self.fs.create_dir('/inputs')
self.fs.create_file('/path/target')
self.fs.create_file('/path/blah.dict')
self.fs.create_file('/path/target_seed_corpus.zip')
self.fs.create_file(
'/path/target.options',
contents=('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=blah.dict\n'))
os.environ['FAIL_RETRIES'] = '1'
os.environ['FUZZ_INPUTS_DISK'] = '/inputs'
test_helpers.patch(self, ['bot.fuzzers.libfuzzer.pick_strategies'])
self.mock.pick_strategies.return_value = libfuzzer.StrategyInfo(
fuzzing_strategies=[
'unknown_1', 'value_profile', 'corpus_subset_20', 'fork_2'
],
arguments=['-arg1'],
additional_corpus_dirs=['/new_corpus_dir'],
extra_env={'extra_env': '1'},
use_dataflow_tracing=False,
is_mutations_run=True)
def test_prepare(self):
"""Test prepare."""
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
self.assertEqual('/corpus_dir', options.corpus_dir)
six.assertCountEqual(self, [
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1',
'-dict=/path/blah.dict'
], options.arguments)
self.assertDictEqual({
'value_profile': 1,
'corpus_subset': 20,
'fork': 2
}, options.strategies)
six.assertCountEqual(self, ['/new_corpus_dir', '/corpus_dir'],
options.fuzz_corpus_dirs)
self.assertDictEqual({'extra_env': '1'}, options.extra_env)
self.assertFalse(options.use_dataflow_tracing)
self.assertTrue(options.is_mutations_run)
self.mock.unpack_seed_corpus_if_needed.assert_called_with(
'/path/target', '/corpus_dir')
def test_prepare_invalid_dict(self):
"""Test prepare with an invalid dict path."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=not_exist.dict\n')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
six.assertCountEqual(
self, ['-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1'],
options.arguments)
def test_prepare_auto_add_dict(self):
"""Test prepare automatically adding dict argument."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n' 'max_len=31337\n' 'timeout=11\n')
self.fs.create_file('/path/target.dict')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
six.assertCountEqual(self, [
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2560', '-arg1',
'-dict=/path/target.dict'
], options.arguments)
class PickStrategiesTest(fake_fs_unittest.TestCase):
"""pick_strategies tests."""
def setUp(self):
test_helpers.patch(self, ['random.SystemRandom.randint'])
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/path/corpus')
self.fs.create_file('/path/target')
def test_max_length_strategy_with_override(self):
"""Tests max length strategy with override."""
strategy_pool = set_strategy_pool([strategy.RANDOM_MAX_LENGTH_STRATEGY])
strategy_info = libfuzzer.pick_strategies(strategy_pool, '/path/target',
'/path/corpus', ['-max_len=100'])
six.assertCountEqual(self, [], strategy_info.arguments)
def test_max_length_strategy_without_override(self):
"""Tests max length strategy without override."""
self.mock.randint.return_value = 1337
strategy_pool = set_strategy_pool([strategy.RANDOM_MAX_LENGTH_STRATEGY])
strategy_info = libfuzzer.pick_strategies(strategy_pool, '/path/target',
'/path/corpus', [])
six.assertCountEqual(self, ['-max_len=1337'], strategy_info.arguments)
class FuzzTest(fake_fs_unittest.TestCase):
"""Fuzz() tests."""
def setUp(self):
# Set up fake filesystem.
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/corpus')
self.fs.create_dir('/fuzz-inputs')
self.fs.create_dir('/fake')
self.fs.create_file('/target')
self.fs.add_real_directory(TEST_DIR)
test_helpers.patch(self, [
'bot.fuzzers.libFuzzer.engine._is_multistep_merge_supported',
'bot.fuzzers.libfuzzer.LibFuzzerRunner.fuzz',
'bot.fuzzers.libfuzzer.LibFuzzerRunner.merge',
'os.getpid',
])
os.environ['JOB_NAME'] = 'libfuzzer_asan_job'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs'
self.mock._is_multistep_merge_supported = True # pylint: disable=protected-access
self.mock.getpid.return_value = 9001
self.maxDiff = None # pylint: disable=invalid-name
def test_fuzz(self):
"""Test fuzz."""
engine_impl = engine.LibFuzzerEngine()
options = engine.LibFuzzerOptions(
'/corpus',
['-arg=1', '-timeout=123', '-dict=blah.dict', '-max_len=9001'], [],
['/corpus'], {}, False, False)
with open(os.path.join(TEST_DIR, 'crash.txt')) as f:
fuzz_output = f.read()
def mock_fuzz(*args, **kwargs): # pylint: disable=unused-argument
"""Mock fuzz."""
self.fs.create_file('/fuzz-inputs/temp-9001/new/A')
self.fs.create_file('/fuzz-inputs/temp-9001/new/B')
return new_process.ProcessResult(
command='command',
return_code=0,
output=fuzz_output,
time_executed=2.0,
timed_out=False)
# Record the merge calls manually as the mock module duplicates the second
# call and overwrites the first call arguments.
mock_merge_calls = []
def mock_merge(*args, **kwargs): # pylint: disable=unused-argument
"""Mock merge."""
mock_merge_calls.append(self.mock.merge.mock_calls[-1])
self.assertTrue(len(mock_merge_calls) <= 2)
merge_output_file = 'merge_step_%d.txt' % len(mock_merge_calls)
with open(os.path.join(TEST_DIR, merge_output_file)) as f:
merge_output = f.read()
self.fs.create_file('/fuzz-inputs/temp-9001/merge-corpus/A')
return new_process.ProcessResult(
command='merge-command',
return_code=0,
output=merge_output,
time_executed=2.0,
timed_out=False)
self.mock.fuzz.side_effect = mock_fuzz
self.mock.merge.side_effect = mock_merge
result = engine_impl.fuzz('/target', options, '/fake', 3600)
self.assertEqual(1, len(result.crashes))
self.assertEqual(fuzz_output, result.logs)
crash = result.crashes[0]
self.assertEqual('/fake/crash-1e15825e6f0b2240a5af75d84214adda1b6b5340',
crash.input_path)
self.assertEqual(fuzz_output, crash.stacktrace)
six.assertCountEqual(self, ['-arg=1', '-timeout=60'], crash.reproduce_args)
self.assertEqual(2, crash.crash_time)
self.mock.fuzz.assert_called_with(
mock.ANY, ['/fuzz-inputs/temp-9001/new', '/corpus'],
additional_args=[
'-arg=1',
'-timeout=123',
'-dict=blah.dict',
'-max_len=9001',
],
artifact_prefix='/fake',
extra_env={},
fuzz_timeout=1470.0)
self.assertEqual(2, len(mock_merge_calls))
# Main things to test are:
# 1) The new corpus directory is used in the second call only.
# 2) the merge contro file is explicitly specified for both calls.
mock_merge_calls[0].assert_called_with(
mock.ANY, [
'/fuzz-inputs/temp-9001/merge-corpus',
'/corpus',
],
additional_args=[
'-arg=1',
'-timeout=123',
'-merge_control_file=/fuzz-inputs/temp-9001/merge-workdir/MCF',
],
artifact_prefix=None,
merge_timeout=1800.0,
tmp_dir='/fuzz-inputs/temp-9001/merge-workdir')
mock_merge_calls[1].assert_called_with(
mock.ANY, [
'/fuzz-inputs/temp-9001/merge-corpus',
'/corpus',
'/fuzz-inputs/temp-9001/new',
],
additional_args=[
'-arg=1',
'-timeout=123',
'-merge_control_file=/fuzz-inputs/temp-9001/merge-workdir/MCF',
],
artifact_prefix=None,
merge_timeout=1800.0,
tmp_dir='/fuzz-inputs/temp-9001/merge-workdir')
self.assertDictEqual({
'actual_duration': 2,
'average_exec_per_sec': 21,
'bad_instrumentation': 0,
'corpus_crash_count': 0,
'corpus_size': 0,
'crash_count': 1,
'dict_used': 1,
'edge_coverage': 411,
'edges_total': 398467,
'expected_duration': 1450,
'feature_coverage': 1873,
'fuzzing_time_percent': 0.13793103448275862,
'initial_edge_coverage': 410,
'initial_feature_coverage': 1869,
'leak_count': 0,
'log_lines_from_engine': 2,
'log_lines_ignored': 67,
'log_lines_unwanted': 0,
'manual_dict_size': 0,
'max_len': 9001,
'merge_edge_coverage': 0,
'new_edges': 1,
'new_features': 4,
'new_units_added': 1,
'new_units_generated': 0,
'number_of_executed_units': 1249,
'oom_count': 0,
'peak_rss_mb': 1197,
'recommended_dict_size': 0,
'slow_unit_count': 0,
'slow_units_count': 0,
'slowest_unit_time_sec': 0,
'startup_crash_count': 0,
'strategy_corpus_mutations_ml_rnn': 0,
'strategy_corpus_mutations_radamsa': 0,
'strategy_corpus_subset': 0,
'strategy_dataflow_tracing': 0,
'strategy_fork': 0,
'strategy_mutator_plugin': 0,
'strategy_mutator_plugin_radamsa': 0,
'strategy_random_max_len': 0,
'strategy_recommended_dict': 0,
'strategy_selection_method': 'default',
'strategy_value_profile': 0,
'timeout_count': 0,
'timeout_limit': 123,
}, result.stats)
def set_strategy_pool(strategies=None):
"""Helper method to create instances of strategy pools
for patching use."""
strategy_pool = strategy_selection.StrategyPool()
if strategies is not None:
for strategy_tuple in strategies:
strategy_pool.add_strategy(strategy_tuple)
return strategy_pool
def mock_random_choice(seq):
"""Always returns first element from the sequence."""
# We could try to mock a particular |seq| to be a list with a single element,
# but it does not work well, as random_choice returns a 'mock.mock.MagicMock'
# object that behaves differently from the actual type of |seq[0]|.
return seq[0]
def clear_temp_dir():
"""Clear temp directory."""
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
os.mkdir(TEMP_DIR)
def setup_testcase_and_corpus(testcase, corpus):
"""Setup testcase and corpus."""
clear_temp_dir()
copied_testcase_path = os.path.join(TEMP_DIR, testcase)
shutil.copy(os.path.join(DATA_DIR, testcase), copied_testcase_path)
copied_corpus_path = os.path.join(TEMP_DIR, corpus)
src_corpus_path = os.path.join(DATA_DIR, corpus)
if os.path.exists(src_corpus_path):
shutil.copytree(src_corpus_path, copied_corpus_path)
else:
os.mkdir(copied_corpus_path)
return copied_testcase_path, copied_corpus_path
def get_fuzz_timeout(fuzz_time):
"""Return timeout for fuzzing."""
return (fuzz_time + libfuzzer.LibFuzzerCommon.LIBFUZZER_CLEAN_EXIT_TIME +
libfuzzer.LibFuzzerCommon.SIGTERM_WAIT_TIME)
def mock_get_directory_file_count(dir_path):
"""Mocked version, always return 1 for new testcases directory."""
if dir_path == os.path.join(fuzzer_utils.get_temp_dir(), 'new'):
return 1
return _get_directory_file_count_orig(dir_path)
class BaseIntegrationTest(unittest.TestCase):
"""Base integration tests."""
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
test_helpers.patch_environ(self)
os.environ['BUILD_DIR'] = DATA_DIR
os.environ['FAIL_RETRIES'] = '1'
os.environ['FUZZ_INPUTS_DISK'] = TEMP_DIR
os.environ['FUZZ_TEST_TIMEOUT'] = '4800'
os.environ['JOB_NAME'] = 'libfuzzer_asan'
os.environ['INPUT_DIR'] = TEMP_DIR
test_helpers.patch(self, [
'bot.fuzzers.engine_common.get_merge_timeout',
'bot.fuzzers.engine_common.random_choice',
'bot.fuzzers.mutator_plugin._download_mutator_plugin_archive',
'bot.fuzzers.mutator_plugin._get_mutator_plugins_from_bucket',
'bot.fuzzers.strategy_selection.generate_weighted_strategy_pool',
'bot.fuzzers.libfuzzer.get_dictionary_analysis_timeout',
'bot.fuzzers.libfuzzer.get_fuzz_timeout',
'os.getpid',
'system.minijail.MinijailChroot._mknod',
])
self.mock.getpid.return_value = 1337
self.mock._get_mutator_plugins_from_bucket.return_value = [] # pylint: disable=protected-access
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool()
self.mock.get_dictionary_analysis_timeout.return_value = 5
self.mock.get_merge_timeout.return_value = 10
self.mock.random_choice.side_effect = mock_random_choice
@test_utils.integration
class IntegrationTests(BaseIntegrationTest):
"""Base libFuzzer libfuzzer tests."""
def setUp(self):
BaseIntegrationTest.setUp(self)
self.crash_dir = TEMP_DIR
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Compare expected arguments."""
self.assertListEqual(actual,
[target_path] + arguments + corpora_or_testcase)
def assert_has_stats(self, stats):
"""Asserts that libFuzzer stats are in output."""
self.assertIn('number_of_executed_units', stats)
self.assertIn('average_exec_per_sec', stats)
self.assertIn('new_units_added', stats)
self.assertIn('slowest_unit_time_sec', stats)
self.assertIn('peak_rss_mb', stats)
def test_single_testcase_crash(self):
"""Tests libfuzzer with a crashing testcase."""
testcase_path, _ = setup_testcase_and_corpus('crash', 'empty_corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
result = engine_impl.reproduce(target_path, testcase_path,
['-timeout=60', '-rss_limit_mb=2560'], 65)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'),
['-timeout=60', '-rss_limit_mb=2560', '-runs=100'], [testcase_path],
result.command)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
@test_utils.slow
def test_fuzz_no_crash(self):
"""Tests fuzzing (no crash)."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-use_value_profile=1', '-dict=' + dict_path,
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
# The incremental stats are not zero as the two step merge was used.
self.assertNotEqual(0, results.stats['new_edges'])
self.assertNotEqual(0, results.stats['new_features'])
@test_utils.slow
def test_fuzz_no_crash_with_old_libfuzzer(self):
"""Tests fuzzing (no crash) with an old version of libFuzzer."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer_old')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer_old'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-use_value_profile=1', '-dict=' + dict_path,
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
# The incremental stats are zero as the single step merge was used.
self.assertEqual(0, results.stats['new_edges'])
self.assertEqual(0, results.stats['new_features'])
def test_fuzz_crash(self):
"""Tests fuzzing (crash)."""
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'always_crash_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assert_has_stats(results.stats)
self.compare_arguments(
os.path.join(DATA_DIR, 'always_crash_fuzzer'), [
'-max_len=100', '-timeout=25', '-rss_limit_mb=2560',
'-artifact_prefix=' + TEMP_DIR + '/', '-max_total_time=5',
'-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'corpus')
], results.command)
self.assertEqual(1, len(results.crashes))
self.assertTrue(os.path.exists(results.crashes[0].input_path))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(results.logs, results.crashes[0].stacktrace)
self.assertListEqual([
'-rss_limit_mb=2560',
'-timeout=60',
], results.crashes[0].reproduce_args)
self.assertIn('Test unit written to {0}/crash-'.format(self.crash_dir),
results.logs)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address '
'0x000000000000', results.logs)
def test_fuzz_from_subset(self):
"""Tests fuzzing from corpus subset."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.CORPUS_SUBSET_STRATEGY])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.compare_arguments(
os.path.join(DATA_DIR, 'test_fuzzer'), [
'-max_len=256', '-timeout=25', '-rss_limit_mb=2560',
'-dict=' + dict_path, '-artifact_prefix=' + TEMP_DIR + '/',
'-max_total_time=5', '-print_final_stats=1'
], [
os.path.join(TEMP_DIR, 'temp-1337/new'),
os.path.join(TEMP_DIR, 'temp-1337/subset')
], results.command)
self.assert_has_stats(results.stats)
def test_minimize(self):
"""Tests minimize."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'minimized_testcase')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.minimize_testcase(target_path, [], testcase_path,
minimize_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(minimize_output_path))
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('A', result)
def test_cleanse(self):
"""Tests cleanse."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
cleanse_output_path = os.path.join(TEMP_DIR, 'cleansed_testcase')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.cleanse(target_path, [], testcase_path,
cleanse_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(cleanse_output_path))
with open(cleanse_output_path) as f:
result = f.read()
self.assertFalse(all(c == 'A' for c in result))
def test_analyze_dict(self):
"""Tests recommended dictionary analysis."""
test_helpers.patch(self, [
'bot.fuzzers.dictionary_manager.DictionaryManager.'
'parse_recommended_dictionary_from_log_lines',
'bot.fuzzers.dictionary_manager.DictionaryManager.'
'update_recommended_dictionary',
])
self.mock.parse_recommended_dictionary_from_log_lines.return_value = set([
'"USELESS_0"',
'"APPLE"',
'"USELESS_1"',
'"GINGER"',
'"USELESS_2"',
'"BEET"',
'"USELESS_3"',
])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR,
'analyze_dict_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
expected_recommended_dictionary = set([
'"APPLE"',
'"GINGER"',
'"BEET"',
])
self.assertIn(expected_recommended_dictionary,
self.mock.update_recommended_dictionary.call_args[0])
def test_fuzz_with_mutator_plugin(self):
"""Tests fuzzing with a mutator plugin."""
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(TEMP_DIR,
'mutator-plugins')
# TODO(metzman): Remove the old binary and switch the test to the new one.
fuzz_target_name = 'test_fuzzer_old'
plugin_archive_name = (
'custom_mutator_plugin-libfuzzer_asan-test_fuzzer_old.zip')
# Call before setting up the plugin since this call will erase the directory
# the plugin is written to.
_, corpus_path = setup_testcase_and_corpus('empty', 'empty_corpus')
plugin_archive_path = os.path.join(DATA_DIR, plugin_archive_name)
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.MUTATOR_PLUGIN_STRATEGY])
self.mock._get_mutator_plugins_from_bucket.return_value = [ # pylint: disable=protected-access
plugin_archive_name
]
self.mock._download_mutator_plugin_archive.return_value = ( # pylint: disable=protected-access
plugin_archive_path)
custom_mutator_print_string = 'CUSTOM MUTATOR\n'
try:
target_path = engine_common.find_fuzzer_path(DATA_DIR, fuzz_target_name)
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
finally:
shutil.rmtree(os.environ['MUTATOR_PLUGINS_DIR'])
# custom_mutator_print_string gets printed before the custom mutator mutates
# a test case. Assert that the count is greater than 1 to ensure that the
# function didn't crash on its first execution (after printing).
self.assertGreater(results.logs.count(custom_mutator_print_string), 1)
def test_merge_reductions(self):
"""Tests that reduced testcases are merged back into the original corpus
without deleting the larger version."""
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(1.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'empty_corpus')
fuzz_target_name = 'analyze_dict_fuzzer'
test_helpers.patch(self, [
'bot.fuzzers.libFuzzer.engine.LibFuzzerEngine._create_merge_corpus_dir',
'system.shell.get_directory_file_count',
])
self.mock.get_directory_file_count.side_effect = (
mock_get_directory_file_count)
minimal_unit_contents = 'APPLE'
minimal_unit_hash = '569bea285d70dda2218f89ef5454ea69fb5111ef'
nonminimal_unit_contents = 'APPLEO'
nonminimal_unit_hash = '07aef0e305db0779f3b52ab4dad975a1b737c461'
def mocked_create_merge_directory(_):
"""A mocked version of create_merge_directory that adds some interesting
files to the merge corpus and initial corpus."""
merge_directory_path = libfuzzer.create_corpus_directory('merge-corpus')
# Write the minimal unit to the new corpus directory.
new_corpus_directory_path = libfuzzer.create_corpus_directory('new')
minimal_unit_path = os.path.join(new_corpus_directory_path,
minimal_unit_hash)
with open(minimal_unit_path, 'w+') as file_handle:
file_handle.write(minimal_unit_contents)
# Write the nonminimal unit to the corpus directory.
nonminimal_unit_path = os.path.join(corpus_path, nonminimal_unit_hash)
with open(nonminimal_unit_path, 'w+') as file_handle:
file_handle.write(nonminimal_unit_contents)
return merge_directory_path
# pylint: disable=protected-access
self.mock._create_merge_corpus_dir.side_effect = (
mocked_create_merge_directory)
target_path = engine_common.find_fuzzer_path(DATA_DIR, fuzz_target_name)
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.arguments.append('-runs=10')
engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
# Verify that both the newly found minimal testcase and the nonminimal
# testcase are in the corpus.
self.assertIn(minimal_unit_hash, os.listdir(corpus_path))
self.assertIn(nonminimal_unit_hash, os.listdir(corpus_path))
def test_exit_failure_logged(self):
"""Test that we log when libFuzzer's exit code indicates it ran into an
error."""
test_helpers.patch(self, [
'metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = '1'
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assertEqual(1, self.mock.log_error.call_count)
self.assertEqual(1, len(results.crashes))
self.assertEqual(fuzzer_utils.get_temp_dir(),
os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
@parameterized.parameterized.expand(['77', '27'])
def test_exit_target_bug_not_logged(self, exit_code):
"""Test that we don't log when exit code indicates bug found in target."""
test_helpers.patch(self, [
'metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertNotIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = exit_code
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assertEqual(1, len(results.crashes))
self.assertEqual(fuzzer_utils.get_temp_dir(),
os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
def test_fuzz_invalid_dict(self):
"""Tests fuzzing with an invalid dictionary (ParseDictionaryFile crash)."""
test_helpers.patch(self, [
'metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertIn('Dictionary parsing failed (target=test_fuzzer, line=2).',
args[0])
self.mock.log_error.side_effect = mocked_log_error
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'test_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
invalid_dict_path = os.path.join(DATA_DIR, 'invalid.dict')
options.arguments.append('-dict=' + invalid_dict_path)
engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
@test_utils.integration
class MinijailIntegrationTests(IntegrationTests):
"""Minijail integration tests."""
def setUp(self):
IntegrationTests.setUp(self)
os.environ['USE_MINIJAIL'] = 'True'
self.crash_dir = '/temp'
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Overridden compare_arguments."""
def _to_chroot_path(path):
"""Convert to chroot path."""
return '/' + os.path.basename(path.rstrip('/'))
for i, argument in enumerate(arguments):
if not argument.startswith(constants.ARTIFACT_PREFIX_FLAG):
continue
arguments[i] = constants.ARTIFACT_PREFIX_FLAG + _to_chroot_path(
argument[len(constants.ARTIFACT_PREFIX_FLAG):]) + '/'
expected_arguments = [target_path] + arguments + [
_to_chroot_path(item) for item in corpora_or_testcase
]
# Ignore minijail arguments
self.assertListEqual(expected_arguments, actual[-len(expected_arguments):])
def test_exit_failure_logged(self):
"""Exit failure is not logged in minijail."""
pass
@parameterized.parameterized.expand(['1', '77', '27'])
def test_exit_target_bug_not_logged(self, exit_code):
"""Test that we don't log when exit code indicates bug found in target."""
test_helpers.patch(self, [
'metrics.logs.log_error',
])
def mocked_log_error(*args, **kwargs): # pylint: disable=unused-argument
self.assertNotIn(engine.ENGINE_ERROR_MESSAGE, args[0])
self.mock.log_error.side_effect = mocked_log_error
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
target_path = engine_common.find_fuzzer_path(DATA_DIR, 'exit_fuzzer')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
options.extra_env['EXIT_FUZZER_CODE'] = exit_code
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assertEqual(1, len(results.crashes))
self.assertEqual(fuzzer_utils.get_temp_dir(),
os.path.dirname(results.crashes[0].input_path))
self.assertEqual(0, os.path.getsize(results.crashes[0].input_path))
@test_utils.integration
@test_utils.with_cloud_emulators('datastore')
class IntegrationTestsFuchsia(BaseIntegrationTest):
"""libFuzzer tests (Fuchsia)."""
def setUp(self):
BaseIntegrationTest.setUp(self)
self.temp_dir = tempfile.mkdtemp()
builds_dir = os.path.join(self.temp_dir, 'builds')
os.mkdir(builds_dir)
urls_dir = os.path.join(self.temp_dir, 'urls')
os.mkdir(urls_dir)
environment.set_value('BUILDS_DIR', builds_dir)
environment.set_value('BUILD_URLS_DIR', urls_dir)
environment.set_value('QUEUE_OVERRIDE', 'FUCHSIA')
environment.set_value('OS_OVERRIDE', 'FUCHSIA')
environment.set_value(
'RELEASE_BUILD_BUCKET_PATH',
'gs://clusterfuchsia-builds-test/libfuzzer/'
'fuchsia-([0-9]+).zip')
environment.set_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES', True)
test_helpers.patch(self, [
'system.shell.clear_temp_directory',
])
def tearDown(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia test until build size reduced.')
def test_fuzzer_can_boot_and_run_with_corpus(self):
"""Tests running a single round of fuzzing on a Fuchsia target, using
a toy fuzzer that should crash very quickly.
Additionally, tests that pushing a corpus to the target works & produces
an expanded corpus."""
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
environment.set_value('FUZZ_TARGET', 'example_fuzzers/trap_fuzzer')
build_manager.setup_build()
_, corpus_path = setup_testcase_and_corpus('aaaa', 'fuchsia_corpus')
num_files_original = len([corpfile for corpfile in os.listdir(corpus_path)])
engine_impl = engine.LibFuzzerEngine()
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(20.0)
options = engine_impl.prepare(corpus_path, 'example_fuzzers/trap_fuzzer',
DATA_DIR)
results = engine_impl.fuzz('example_fuzzers/trap_fuzzer', options, TEMP_DIR,
20)
# If we don't get a crash, something went wrong.
self.assertIn('Test unit written to', results.logs)
# Check that the command was invoked with a corpus argument.
self.assertIn('data/corpus/new', results.command)
# Check that new units were added to the corpus.
num_files_new = len([
corpfile
for corpfile in os.listdir(os.path.join(TEMP_DIR, 'temp-1337/new'))
])
self.assertGreater(num_files_new, num_files_original)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_fuzzer_can_boot_and_run_reproducer(self):
"""Tests running a testcase that should cause a fast, predictable crash."""
environment.set_value('FUZZ_TARGET', 'example_fuzzers/overflow_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_crash',
'empty_corpus')
engine_impl = engine.LibFuzzerEngine()
result = engine_impl.reproduce('example_fuzzers/overflow_fuzzer',
testcase_path,
['-timeout=25', '-rss_limit_mb=2560'], 30)
self.assertIn('ERROR: AddressSanitizer: heap-buffer-overflow on address',
result.output)
self.assertIn('Running: data/fuchsia_crash', result.output)
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_qemu_logs_returned_on_error(self):
"""Test running against a qemu that has died"""
test_helpers.patch(self, ['metrics.logs.log_warn'])
# Pass-through logs just so we can see what's going on (but moving from
# log_warn to plain log to avoid creating a loop)
self.mock.log_warn.side_effect = logs.log
environment.set_value('FUZZ_TARGET', 'example_fuzzers/overflow_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_crash',
'empty_corpus')
runner = libfuzzer.FuchsiaQemuLibFuzzerRunner('fake/fuzzer')
# Check that it's up properly
self.assertEqual(runner.device.ssh(['echo', 'hello']), 0)
# Force shutdown
runner.device.ssh(['dm', 'shutdown'])
# Try to fuzz against the dead qemu to trigger automatic recovery behavior
engine_impl = engine.LibFuzzerEngine()
engine_impl.reproduce('example_fuzzers/overflow_fuzzer', testcase_path,
['-timeout=25', '-rss_limit_mb=2560'], 30)
# Check the logs for the shutdown sequence
self.assertIn('Shutting down', self.mock.log_warn.call_args[0][0])
@unittest.skipIf(
not environment.get_value('FUCHSIA_TESTS'),
'Temporarily disabling the Fuchsia tests until build size reduced.')
def test_minimize_testcase(self):
"""Tests running a testcase that should be able to minimize."""
environment.set_value('FUZZ_TARGET', 'example_fuzzers/trap_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
build_manager.setup_build()
testcase_path, _ = setup_testcase_and_corpus('fuchsia_overlong_crash',
'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'output')
engine_impl = engine.LibFuzzerEngine()
result = engine_impl.minimize_testcase('example_fuzzers/trap_fuzzer',
['-runs=1000000'], testcase_path,
minimize_output_path, 30)
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('HI!', result)
@test_utils.integration
@test_utils.with_cloud_emulators('datastore')
class IntegrationTestsAndroid(BaseIntegrationTest, android_helpers.AndroidTest):
"""libFuzzer tests (Android)."""
def setUp(self):
android_helpers.AndroidTest.setUp(self)
BaseIntegrationTest.setUp(self)
if android.settings.get_sanitizer_tool_name() != 'hwasan':
raise Exception('Device is not set up with HWASan.')
environment.set_value('BUILD_DIR', ANDROID_DATA_DIR)
environment.set_value('JOB_NAME', 'libfuzzer_hwasan_android_device')
environment.reset_current_memory_tool_options()
self.crash_dir = TEMP_DIR
self.adb_path = android.adb.get_adb_path()
self.hwasan_options = 'HWASAN_OPTIONS="%s"' % quote(
environment.get_value('HWASAN_OPTIONS'))
def device_path(self, local_path):
"""Return device path for a local path."""
return os.path.join(
android.constants.DEVICE_FUZZING_DIR,
os.path.relpath(local_path, environment.get_root_directory()))
def assert_has_stats(self, stats):
"""Asserts that libFuzzer stats are in output."""
self.assertIn('number_of_executed_units', stats)
self.assertIn('average_exec_per_sec', stats)
self.assertIn('new_units_added', stats)
self.assertIn('slowest_unit_time_sec', stats)
self.assertIn('peak_rss_mb', stats)
def test_single_testcase_crash(self):
"""Tests libfuzzer with a crashing testcase."""
testcase_path, _ = setup_testcase_and_corpus('crash', 'empty_corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
result = engine_impl.reproduce(target_path, testcase_path,
['-timeout=60', '-rss_limit_mb=2560'], 65)
self.assertEqual([
self.adb_path, 'shell', self.hwasan_options,
self.device_path(target_path), '-timeout=60', '-rss_limit_mb=2560',
'-runs=100',
self.device_path(testcase_path)
], result.command)
self.assertIn(
'ERROR: HWAddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
@test_utils.slow
def test_fuzz_no_crash(self):
"""Tests fuzzing (no crash)."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.VALUE_PROFILE_STRATEGY])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assert_has_stats(results.stats)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=256',
'-timeout=25',
'-rss_limit_mb=2560',
'-use_value_profile=1',
'-dict=' + self.device_path(dict_path),
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'corpus')),
], results.command)
self.assertTrue(android.adb.file_exists(self.device_path(dict_path)))
self.assertEqual(0, len(results.crashes))
# New items should've been added to the corpus.
self.assertNotEqual(0, len(os.listdir(corpus_path)))
def test_fuzz_crash(self):
"""Tests fuzzing (crash)."""
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty', 'corpus')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'always_crash_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assert_has_stats(results.stats)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=100',
'-timeout=25',
'-rss_limit_mb=2560',
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'corpus')),
], results.command)
self.assertEqual(1, len(results.crashes))
self.assertTrue(os.path.exists(results.crashes[0].input_path))
self.assertEqual(TEMP_DIR, os.path.dirname(results.crashes[0].input_path))
self.assertEqual(results.logs, results.crashes[0].stacktrace)
self.assertListEqual([
'-rss_limit_mb=2560',
'-timeout=60',
], results.crashes[0].reproduce_args)
self.assertIn(
'Test unit written to {0}/crash-'.format(
self.device_path(self.crash_dir)), results.logs)
self.assertIn(
'ERROR: HWAddressSanitizer: SEGV on unknown address '
'0x000000000000', results.logs)
def test_fuzz_from_subset(self):
"""Tests fuzzing from corpus subset."""
self.mock.generate_weighted_strategy_pool.return_value = set_strategy_pool(
[strategy.CORPUS_SUBSET_STRATEGY])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'test_fuzzer')
dict_path = target_path + '.dict'
options = engine_impl.prepare(corpus_path, target_path, ANDROID_DATA_DIR)
results = engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
self.assertEqual([
self.adb_path,
'shell',
self.hwasan_options,
self.device_path(target_path),
'-max_len=256',
'-timeout=25',
'-rss_limit_mb=2560',
'-dict=' + self.device_path(dict_path),
'-artifact_prefix=' + self.device_path(TEMP_DIR) + '/',
'-max_total_time=5',
'-print_final_stats=1',
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/new')),
self.device_path(os.path.join(TEMP_DIR, 'temp-1337/subset')),
], results.command)
self.assertTrue(android.adb.file_exists(self.device_path(dict_path)))
self.assert_has_stats(results.stats)
def test_minimize(self):
"""Tests minimize."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
minimize_output_path = os.path.join(TEMP_DIR, 'minimized_testcase')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.minimize_testcase(target_path, [], testcase_path,
minimize_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(minimize_output_path))
with open(minimize_output_path) as f:
result = f.read()
self.assertEqual('A', result)
def test_cleanse(self):
"""Tests cleanse."""
testcase_path, _ = setup_testcase_and_corpus('aaaa', 'empty_corpus')
cleanse_output_path = os.path.join(TEMP_DIR, 'cleansed_testcase')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'crash_with_A_fuzzer')
result = engine_impl.cleanse(target_path, [], testcase_path,
cleanse_output_path, 120)
self.assertTrue(result)
self.assertTrue(os.path.exists(cleanse_output_path))
with open(cleanse_output_path) as f:
result = f.read()
self.assertFalse(all(c == 'A' for c in result))
def test_analyze_dict(self):
"""Tests recommended dictionary analysis."""
test_helpers.patch(self, [
'bot.fuzzers.dictionary_manager.DictionaryManager.'
'parse_recommended_dictionary_from_log_lines',
'bot.fuzzers.dictionary_manager.DictionaryManager.'
'update_recommended_dictionary',
])
self.mock.parse_recommended_dictionary_from_log_lines.return_value = set([
'"USELESS_0"',
'"APPLE"',
'"USELESS_1"',
'"GINGER"',
'"USELESS_2"',
'"BEET"',
'"USELESS_3"',
])
self.mock.get_fuzz_timeout.return_value = get_fuzz_timeout(5.0)
_, corpus_path = setup_testcase_and_corpus('empty',
'corpus_with_some_files')
engine_impl = engine.LibFuzzerEngine()
target_path = engine_common.find_fuzzer_path(ANDROID_DATA_DIR,
'analyze_dict_fuzzer')
options = engine_impl.prepare(corpus_path, target_path, DATA_DIR)
engine_impl.fuzz(target_path, options, TEMP_DIR, 10)
expected_recommended_dictionary = set([
'"APPLE"',
'"GINGER"',
'"BEET"',
])
self.assertIn(expected_recommended_dictionary,
self.mock.update_recommended_dictionary.call_args[0])
| 38.923611
| 100
| 0.678501
|
98496c291367b0c394559725a8e8a5ccdf3cc8aa
| 3,652
|
py
|
Python
|
src/transactors/file_transactor.py
|
alliance-genome/agr_loader
|
79f87f16ccfc1260f57ec651dce6e32b4b926866
|
[
"MIT"
] | 1
|
2018-10-05T15:35:54.000Z
|
2018-10-05T15:35:54.000Z
|
src/transactors/file_transactor.py
|
alliance-genome/agr_loader
|
79f87f16ccfc1260f57ec651dce6e32b4b926866
|
[
"MIT"
] | 176
|
2017-09-05T23:01:59.000Z
|
2021-11-16T13:14:45.000Z
|
src/transactors/file_transactor.py
|
alliance-genome/agr_loader
|
79f87f16ccfc1260f57ec651dce6e32b4b926866
|
[
"MIT"
] | 5
|
2020-08-19T11:50:59.000Z
|
2020-12-19T08:54:01.000Z
|
"""File Trnasactor"""
import logging
import multiprocessing
from time import sleep
from etl import ETL
class FileTransactor():
"""File Transactor"""
logger = logging.getLogger(__name__)
count = 0
queue = None
def __init__(self):
manager = multiprocessing.Manager()
FileTransactor.queue = manager.Queue()
self.filetracking_queue = manager.list()
@staticmethod
def _get_name():
return "FileTransactor %s" % multiprocessing.current_process().name
def start_threads(self, thread_count):
"""Start Threads"""
self.thread_pool = []
for i in range(0, thread_count):
process = multiprocessing.Process(target=self.run, name=str(i), args=(self.filetracking_queue,))
process.start()
self.thread_pool.append(process)
@staticmethod
def execute_transaction(sub_type):
"""Execture Transaction"""
FileTransactor.count = FileTransactor.count + 1
FileTransactor.queue.put((sub_type, FileTransactor.count))
FileTransactor.logger.debug("Execute Transaction Batch: %s QueueSize: %s ", FileTransactor.count, FileTransactor.queue.qsize())
def check_for_thread_errors(self):
"""Check for Thread Errors"""
ETL.wait_for_threads(self.thread_pool, FileTransactor.queue)
def wait_for_queues(self):
"""Wait for Queues"""
FileTransactor.queue.join()
def shutdown(self):
"""Shutdown"""
self.logger.debug("Shutting down FileTransactor threads: %s", len(self.thread_pool))
for thread in self.thread_pool:
thread.terminate()
self.logger.debug("Finished Shutting down FileTransactor threads")
def run(self, filetracking_queue):
"""Run"""
self.logger.debug("%s: Starting FileTransactor Thread Runner.", self._get_name())
while True:
try:
(sub_type, FileTransactor.count) = FileTransactor.queue.get()
except EOFError as error:
self.logger.debug("Queue Closed exiting: %s", error)
return
self.logger.debug("%s: Pulled File Transaction Batch: %s QueueSize: %s ", self._get_name(), FileTransactor.count, FileTransactor.queue.qsize())
self.download_file(sub_type, filetracking_queue)
FileTransactor.queue.task_done()
#EOFError
def download_file(self, sub_type, filetracking_queue):
"""Download File"""
filepath = sub_type.get_filepath()
url_to_download = sub_type.get_file_url()
self.logger.debug("%s: Acquiring file: %s from filepath: %s", self._get_name(), filepath, url_to_download)
self.logger.debug("%s: Checking whether the file is currently downloading: %s", self._get_name(), url_to_download)
if url_to_download in filetracking_queue:
self.logger.debug("%s: The file is already downloading, waiting for it to finish: %s", self._get_name(), url_to_download)
while url_to_download in filetracking_queue:
sleep(1)
self.logger.debug("%s: File no longer downloading, proceeding: %s", self._get_name(), url_to_download)
sub_type.get_data()
else:
self.logger.debug("%s: File not currently downloading, initiating download: %s", self._get_name(), url_to_download)
filetracking_queue.append(url_to_download)
sub_type.get_data()
self.logger.debug("%s: Download complete. Removing item from download queue: %s", self._get_name(), url_to_download)
filetracking_queue.remove(url_to_download)
| 35.803922
| 155
| 0.65471
|
d526243e9b58d2e6e06828534465d66de0db33fb
| 1,730
|
py
|
Python
|
telemaster/mobile_phone_mast_repository.py
|
seanjohnno/TeleMaster
|
40f2d1c37dab4c26503ae3813fcf8f6f9d52dada
|
[
"MIT"
] | null | null | null |
telemaster/mobile_phone_mast_repository.py
|
seanjohnno/TeleMaster
|
40f2d1c37dab4c26503ae3813fcf8f6f9d52dada
|
[
"MIT"
] | null | null | null |
telemaster/mobile_phone_mast_repository.py
|
seanjohnno/TeleMaster
|
40f2d1c37dab4c26503ae3813fcf8f6f9d52dada
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import time
from typing import List
class MobilePhoneMastInfo:
def __init__(self, csv_row):
self.__csv_row = csv_row
def property_name(self):
return self.__csv_row['Property Name']
def rent(self):
return float(self.__csv_row['Current Rent'])
def lease_years(self):
return int(self.__csv_row['Lease Years'])
def tenant_name(self):
return self.__csv_row['Tenant Name']
def lease_start_date(self):
date = self.__csv_row['Lease Start Date']
return self.__parse_date(date)
def property_1st_line_address(self):
return self.__csv_row['Property Address [1]']
def property_2nd_line_address(self):
return self.__csv_row['Property Address [2]']
def property_3rd_line_address(self):
return self.__csv_row['Property Address [3]']
def property_4th_line_address(self):
return self.__csv_row['Property Address [4]']
def property_unit_name(self):
return self.__csv_row['Unit Name']
def lease_end_date(self):
date = self.__csv_row['Lease End Date']
return self.__parse_date(date)
def __parse_date(self, date):
parsed_date_struct = time.strptime(date, '%d %b %Y')
return datetime.date(parsed_date_struct[0], parsed_date_struct[1], parsed_date_struct[2])
class MobilePhoneMastRepository:
def __init__(self, csv_file_location: str):
self.__csv_file_location = csv_file_location
def list_all_masts(self) -> List[MobilePhoneMastInfo]:
with open(self.__csv_file_location) as csvfile:
reader = csv.DictReader(csvfile)
return [MobilePhoneMastInfo(row) for row in reader]
| 30.350877
| 97
| 0.682659
|
060c4dc93b4b130e7efb9b4ece567e2b39065ecf
| 1,056
|
py
|
Python
|
hw_14/diacritization_eval.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
hw_14/diacritization_eval.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
hw_14/diacritization_eval.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("gold", type=str, help="Gold data")
parser.add_argument("system", type=str, help="System data")
args = parser.parse_args()
with open(args.gold, "r", encoding="utf-8") as gold_file:
gold = [line.rstrip("\n").split() for line in gold_file.readlines()]
with open(args.system, "r", encoding="utf-8") as system_file:
system = [line.rstrip("\n").split() for line in system_file.readlines()]
tokens, correct = 0, 0
assert len(gold) == len(
system
), "The gold and system files need to have the same number of lines"
for i in range(len(gold)):
assert len(gold[i]) == len(
system[i]
), "Each line in gold and system files must have the same number of tokens line " + str(i)
for j in range(len(gold[i])):
tokens += 1
correct += gold[i][j] == system[i][j]
print("Accuracy: {:.2f}".format(100 * correct / tokens))
| 36.413793
| 98
| 0.608902
|
144f2d32fe9c2f4b96613e64bcaef83aee6592c3
| 1,896
|
py
|
Python
|
testing/test_notebooks.py
|
N1m6/alibi-detect
|
46576a3c85b240a576ebd8d6d8203a74efce45eb
|
[
"Apache-2.0"
] | 1,227
|
2019-11-19T15:38:40.000Z
|
2022-03-31T11:18:32.000Z
|
testing/test_notebooks.py
|
N1m6/alibi-detect
|
46576a3c85b240a576ebd8d6d8203a74efce45eb
|
[
"Apache-2.0"
] | 323
|
2019-11-21T18:41:00.000Z
|
2022-03-31T21:08:56.000Z
|
testing/test_notebooks.py
|
N1m6/alibi-detect
|
46576a3c85b240a576ebd8d6d8203a74efce45eb
|
[
"Apache-2.0"
] | 133
|
2019-11-19T14:23:23.000Z
|
2022-03-31T07:55:43.000Z
|
"""
This script is an example of using `jupytext` to execute notebooks for testing instead of relying on `nbmake`
plugin. This approach may be more flexible if our requirements change in the future.
"""
import glob
from pathlib import Path
import pytest
from jupytext.cli import jupytext
# Set of all example notebooks
# NOTE: we specifically get only the name of the notebook not the full path as we want to
# use these as variables on the command line for `pytest` for the workflow executing only
# changed notebooks. `pytest` does not allow `/` as part of the test name for the -k argument.
# This also means that the approach is limited to all notebooks being in the `NOTEBOOK_DIR`
# top-level path.
NOTEBOOK_DIR = 'doc/source/examples'
ALL_NOTEBOOKS = {Path(x).name for x in glob.glob(str(Path(NOTEBOOK_DIR).joinpath('*.ipynb')))}
# The following set includes notebooks which are not to be executed during notebook tests.
# These are typically those that would take too long to run in a CI environment or impractical
# due to other dependencies (e.g. downloading large datasets
EXCLUDE_NOTEBOOKS = {
# the following are all long-running
'cd_distillation_cifar10.ipynb',
'cd_ks_cifar10.ipynb',
'cd_mmd_cifar10.ipynb',
'od_llr_genome.ipynb',
'od_llr_mnist.ipynb',
'od_seq2seq_synth.ipynb',
# the following requires a k8s cluster
'alibi_detect_deploy.ipynb',
# the following require downloading large datasets
'cd_online_camelyon.ipynb',
'cd_text_amazon.ipynb',
# the following require complex dependencies
'cd_mol.ipynb', # complex to install pytorch-geometric
}
EXECUTE_NOTEBOOKS = ALL_NOTEBOOKS - EXCLUDE_NOTEBOOKS
@pytest.mark.timeout(600)
@pytest.mark.parametrize("notebook", EXECUTE_NOTEBOOKS)
def test_notebook_execution(notebook):
notebook = Path(NOTEBOOK_DIR, notebook)
jupytext(args=[str(notebook), "--execute"])
| 39.5
| 109
| 0.758439
|
1a5cc4d46774bebec3d082dec8d26a261b7d91a4
| 1,292
|
py
|
Python
|
shrinky/glsl_terminator.py
|
xyproto/shrinky
|
8f318d2f62f8ef3cffae6bd5db1b36c95067aac6
|
[
"BSD-3-Clause"
] | 11
|
2019-03-16T11:03:50.000Z
|
2021-12-26T12:41:57.000Z
|
shrinky/glsl_terminator.py
|
xyproto/shrinky
|
8f318d2f62f8ef3cffae6bd5db1b36c95067aac6
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T16:22:37.000Z
|
2022-02-12T16:22:37.000Z
|
shrinky/glsl_terminator.py
|
xyproto/shrinky
|
8f318d2f62f8ef3cffae6bd5db1b36c95067aac6
|
[
"BSD-3-Clause"
] | null | null | null |
########################################
# GlslTerminator #######################
########################################
class GlslTerminator:
"""Terminator class."""
def __init__(self, source):
"""Constructor."""
self.__terminator = source
def format(self, force):
"""Return formatted output."""
return self.__terminator
def getTerminator(self):
"""Access terminating character."""
return self.__terminator
def __eq__(self, other):
"""Equals operator."""
if is_glsl_terminator(other):
return self.__terminator == other.getTerminator()
return self.getTerminator() == other
def __ne__(self, other):
"""Not equals operator."""
return not (self == other)
def __str__(self):
"""String representation."""
return "GlslTerminator('%s')" % (self.__terminator)
########################################
# Functions ############################
########################################
def interpret_terminator(source):
"""Try to interpret a terminator."""
if source == ";":
return GlslTerminator(source)
return None
def is_glsl_terminator(op):
"""Tell if token is operator."""
return isinstance(op, GlslTerminator)
| 25.84
| 61
| 0.513158
|
82271689e64fe42f9973a693e6f86e25319fa0f4
| 432
|
py
|
Python
|
models/neck/__init__.py
|
law930001/panpp
|
b1da417cb47df94e3aa9fd039bd94b06ab8df9bd
|
[
"Apache-2.0"
] | 1
|
2021-12-28T06:56:10.000Z
|
2021-12-28T06:56:10.000Z
|
models/neck/__init__.py
|
law930001/panpp
|
b1da417cb47df94e3aa9fd039bd94b06ab8df9bd
|
[
"Apache-2.0"
] | null | null | null |
models/neck/__init__.py
|
law930001/panpp
|
b1da417cb47df94e3aa9fd039bd94b06ab8df9bd
|
[
"Apache-2.0"
] | null | null | null |
from .builder import build_neck
from .fpem_v1 import FPEM_v1
from .fpem_v2 import FPEM_v2 # for PAN++
from .fpn import FPN
from .fpnv2_1 import FPN_v2_1
from .fpnv2_2 import FPN_v2_2
from .fpnv2_3 import FPN_v2_3
from .fpnv3_1 import FPN_v3_1
from .fpnv3_2 import FPN_v3_2
from .fpnv3_3 import FPN_v3_3
__all__ = ['FPN', 'FPEM_v1', 'FPEM_v2', 'build_neck', 'FPN_v2_1', 'FPN_v2_2', 'FPN_v2_3', 'FPN_v3_1', 'FPN_v3_2', 'FPN_v3_3']
| 30.857143
| 125
| 0.761574
|
9faafa9140c20391dea209fafeaf7c9024a7924d
| 2,402
|
py
|
Python
|
src.py
|
SiddharthanSingaravel/Identifying-Sentiments-in-HealthTwitter
|
a743fa1c7bf113b0e69a31ebc234a2a3690f6c70
|
[
"MIT"
] | null | null | null |
src.py
|
SiddharthanSingaravel/Identifying-Sentiments-in-HealthTwitter
|
a743fa1c7bf113b0e69a31ebc234a2a3690f6c70
|
[
"MIT"
] | null | null | null |
src.py
|
SiddharthanSingaravel/Identifying-Sentiments-in-HealthTwitter
|
a743fa1c7bf113b0e69a31ebc234a2a3690f6c70
|
[
"MIT"
] | null | null | null |
import keras
from keras.models import load_model
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Embedding, Flatten, Conv1D, MaxPooling1D, LSTM
from keras.preprocessing.sequence import pad_sequences
import re
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
def process_text(sen):
# Removing html tags
sentence = remove_tags(sen)
# Remove punctuations and numbers
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
# Single character removal
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
X = []
movie_reviews = pd.read_csv("test_data.csv")
movie_reviews.isnull().values.any()
sentences = list(movie_reviews['review'])
for sen in sentences:
X.append(process_text(sen))
y = movie_reviews['sentiment']
y = np.array(list(map(lambda x: 1 if x == "positive" else 0, y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
vocab_size = 290419 # change this to "len(tokenizer.word_index) + 1"
max_len = 100 # optional and can be set to desired val
X_train = pad_sequences(X_train, padding='post', maxlen=max_len)
X_test = pad_sequences(X_test, padding='post', maxlen=max_len)
w2v_model = KeyedVectors.load("model.w2v", mmap='r')
W2V_SIZE = 300
embedding_matrix = np.zeros((vocab_size, W2V_SIZE))
for word, i in tokenizer.word_index.items():
if word in w2v_model.wv:
embedding_matrix[i] = w2v_model.wv[word]
embedding_layer = Embedding(vocab_size, W2V_SIZE, weights=[embedding_matrix], trainable=False)
model = Sequential()
model.add(embedding_layer)
model.add(Dropout(0.5))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.load_weights('model.h5')
model.compile(loss='binary_crossentropy', optimizer="adam", metrics=['accuracy'])
score = model.evaluate(X_test, y_test, batch_size=1)
print("ACCURACY:", score[1])
print("LOSS:", score[0])
| 28.595238
| 99
| 0.735637
|
891b6bccb1ce126140a5452120c7d8b97d680ce6
| 1,274
|
py
|
Python
|
beaconator/config.py
|
mfdeux/beaconator
|
81372cd32457b2d76669354ae20968b678f45d04
|
[
"MIT"
] | null | null | null |
beaconator/config.py
|
mfdeux/beaconator
|
81372cd32457b2d76669354ae20968b678f45d04
|
[
"MIT"
] | null | null | null |
beaconator/config.py
|
mfdeux/beaconator
|
81372cd32457b2d76669354ae20968b678f45d04
|
[
"MIT"
] | null | null | null |
import os
import typing
import pydantic
import yaml
from .backend.utils.secrets import make_jwt_secret, make_temporary_password
class Config(pydantic.BaseModel):
database_uri: str = "sqlite:///data.db"
docs_url: typing.Optional[str] = None
redoc_url: typing.Optional[str] = None
serve_admin: bool = True
admin_path: str = "/admin"
password: typing.Optional[str] = "tDkZMLvsao93Cm5I9FZbwqPH"
jwt_secret: typing.Optional[
str
] = "cQ0MFXdo2CrvBtReB10cey0NMaB4oize76RFZ43FqxpKnXtMEZrY3U0qUV1X"
def load_config(path: typing.Optional[str] = None) -> Config:
if path:
with open(path) as fh:
loaded_config = yaml.safe_load(fh)
config = Config(**loaded_config)
else:
config = Config()
if not config.password:
config.password = make_temporary_password()
print(config.password)
if not config.jwt_secret:
config.jwt_secret = make_jwt_secret()
os.environ["BEACONATOR__DB_URI"] = config.database_uri
os.environ["BEACONATOR__PASSWORD"] = config.password
os.environ["BEACONATOR__JWT_SECRET"] = config.jwt_secret
return config
def save_config(temp_config: typing.Dict, path: str):
with open(path) as fh:
yaml.dump(temp_config, fh)
| 27.695652
| 75
| 0.696232
|
209543e6158635ab1051295945cb27956aacbbc3
| 13,589
|
py
|
Python
|
pal/parser/pal_model_parser.py
|
jasa/pal
|
2a5b0b3ed4698a077d9c707a1e6c692eebfb362a
|
[
"MIT"
] | 26
|
2020-01-06T23:53:17.000Z
|
2022-02-01T08:58:21.000Z
|
pal/parser/pal_model_parser.py
|
jasa/pal
|
2a5b0b3ed4698a077d9c707a1e6c692eebfb362a
|
[
"MIT"
] | 30
|
2019-11-13T00:55:22.000Z
|
2022-01-06T08:09:35.000Z
|
pal/parser/pal_model_parser.py
|
jasa/pal
|
2a5b0b3ed4698a077d9c707a1e6c692eebfb362a
|
[
"MIT"
] | 14
|
2019-11-15T16:56:22.000Z
|
2021-12-22T10:14:17.000Z
|
from pal.parser.abstract_parser import AbstractParser
from pal.logger import logger
from pal.exception import PalParserException
import pal.model
import pal.model.generic
import pal.model.generic.access_mechanism
import pal.model.armv8a
import pal.model.armv8a.access_mechanism
import pal.model.intel
import pal.model.intel.access_mechanism
from yaml import load, dump
from yaml import CLoader as Loader, CDumper as Dumper
import re
class PalModelParser(AbstractParser):
def parse_file(self, path):
registers = []
try:
if "__template__" in path:
return []
with open(path, "r", encoding="utf8") as infile:
data = load(infile, Loader)
for item in data:
arch = item["arch"]
if arch == "intel":
register = pal.model.intel.register.IntelRegister()
elif arch == "armv8a":
register = pal.model.armv8a.register.ARMv8ARegister()
elif arch == "generic":
register = pal.model.generic.register.GenericRegister()
else:
raise Exception("register definition: missing or unknown architecutre (arch)")
self._parse_register(register, item)
self._parse_access_mechanisms(register, item)
self._parse_fieldsets(register, item)
registers.append(register)
except Exception as e:
msg = "Failed to parse register file " + str(path)
msg += ": " + str(e)
raise PalParserException(msg)
return registers
def _parse_register(self, register, yml):
register.name = self._strip_string(yml["name"])
register.size = yml["size"]
if "long_name" in yml:
register.long_name = self._strip_string(yml["long_name"])
if "purpose" in yml:
register.purpose = self._reflow_text(yml["purpose"])
if "arch" in yml:
register.arch = self._strip_string(yml["arch"])
if "is_internal" in yml:
register.is_internal = yml["is_internal"]
if "is_optional" in yml:
register.is_optional = yml["is_optional"]
if "is_indexed" in yml:
register.is_indexed = yml["is_indexed"]
if "execution_state" in yml:
register.execution_state = yml["execution_state"]
if "is_banked" in yml:
register.is_banked = yml["is_banked"]
if "component" in yml:
register.component = self._strip_string(yml["component"])
def _parse_access_mechanisms(self, register, yml):
if not yml["access_mechanisms"]:
return
for am_yml in yml["access_mechanisms"]:
if am_yml["name"] == "read":
am = pal.model.generic.access_mechanism.Read()
am.offset = am_yml["offset"]
register.access_mechanisms["read"].append(am)
elif am_yml["name"] == "write":
am = pal.model.generic.access_mechanism.Write()
am.offset = am_yml["offset"]
register.access_mechanisms["write"].append(am)
elif am_yml["name"] == "read_pci_config":
am = pal.model.intel.access_mechanism.ReadPciConfig()
am.name = am_yml["name"]
am.offset = am_yml["offset"]
register.access_mechanisms["read_pci_config"].append(am)
elif am_yml["name"] == "write_pci_config":
am = pal.model.intel.access_mechanism.WritePciConfig()
am.name = am_yml["name"]
am.offset = am_yml["offset"]
register.access_mechanisms["write_pci_config"].append(am)
elif am_yml["name"] == "mov_read":
am = pal.model.intel.access_mechanism.MOVRead()
am.name = am_yml["name"]
am.source_mnemonic = am_yml["source_mnemonic"]
register.access_mechanisms["mov_read"].append(am)
elif am_yml["name"] == "mov_write":
am = pal.model.intel.access_mechanism.MOVWrite()
am.name = am_yml["name"]
am.destination_mnemonic = am_yml["destination_mnemonic"]
register.access_mechanisms["mov_write"].append(am)
elif am_yml["name"] == "cpuid":
am = pal.model.intel.access_mechanism.CPUID()
am.name = am_yml["name"]
am.leaf = am_yml["leaf"]
am.output = am_yml["output"]
register.access_mechanisms["cpuid"].append(am)
elif am_yml["name"] == "rdmsr":
am = pal.model.intel.access_mechanism.RDMSR()
am.name = am_yml["name"]
am.address = am_yml["address"]
register.access_mechanisms["rdmsr"].append(am)
elif am_yml["name"] == "wrmsr":
am = pal.model.intel.access_mechanism.WRMSR()
am.name = am_yml["name"]
am.address = am_yml["address"]
register.access_mechanisms["wrmsr"].append(am)
elif am_yml["name"] == "vmread":
am = pal.model.intel.access_mechanism.VMRead()
am.name = am_yml["name"]
am.encoding = am_yml["encoding"]
register.access_mechanisms["vmread"].append(am)
elif am_yml["name"] == "vmwrite":
am = pal.model.intel.access_mechanism.VMWrite()
am.name = am_yml["name"]
am.encoding = am_yml["encoding"]
register.access_mechanisms["vmwrite"].append(am)
elif am_yml["name"] == "xgetbv":
am = pal.model.intel.access_mechanism.XGETBV()
am.name = am_yml["name"]
am.register = am_yml["register"]
register.access_mechanisms["xgetbv"].append(am)
elif am_yml["name"] == "xsetbv":
am = pal.model.intel.access_mechanism.XSETBV()
am.name = am_yml["name"]
am.register = am_yml["register"]
register.access_mechanisms["xsetbv"].append(am)
elif am_yml["name"] == "mrs_register":
am = pal.model.armv8a.access_mechanism.MRSRegister()
am.name = am_yml["name"]
am.op0 = am_yml["op0"]
am.op1 = am_yml["op1"]
am.op2 = am_yml["op2"]
am.crn = am_yml["crn"]
am.crm = am_yml["crm"]
am.operand_mnemonic = am_yml["operand_mnemonic"]
register.access_mechanisms["mrs_register"].append(am)
elif am_yml["name"] == "msr_register":
am = pal.model.armv8a.access_mechanism.MSRRegister()
am.name = am_yml["name"]
am.op0 = am_yml["op0"]
am.op1 = am_yml["op1"]
am.op2 = am_yml["op2"]
am.crn = am_yml["crn"]
am.crm = am_yml["crm"]
am.operand_mnemonic = am_yml["operand_mnemonic"]
register.access_mechanisms["msr_register"].append(am)
elif am_yml["name"] == "mcr":
am = pal.model.armv8a.access_mechanism.MCR()
am.name = am_yml["name"]
am.coproc = am_yml["coproc"]
am.opc1 = am_yml["opc1"]
am.opc2 = am_yml["opc2"]
am.crn = am_yml["crn"]
am.crm = am_yml["crm"]
register.access_mechanisms["mcr"].append(am)
elif am_yml["name"] == "mcrr":
am = pal.model.armv8a.access_mechanism.MCRR()
am.name = am_yml["name"]
am.coproc = am_yml["coproc"]
am.opc1 = am_yml["opc1"]
am.crm = am_yml["crm"]
register.access_mechanisms["mcrr"].append(am)
elif am_yml["name"] == "mrc":
am = pal.model.armv8a.access_mechanism.MRC()
am.name = am_yml["name"]
am.coproc = am_yml["coproc"]
am.opc1 = am_yml["opc1"]
am.opc2 = am_yml["opc2"]
am.crn = am_yml["crn"]
am.crm = am_yml["crm"]
register.access_mechanisms["mrc"].append(am)
elif am_yml["name"] == "mrrc":
am = pal.model.armv8a.access_mechanism.MRRC()
am.name = am_yml["name"]
am.coproc = am_yml["coproc"]
am.opc1 = am_yml["opc1"]
am.crm = am_yml["crm"]
register.access_mechanisms["mrrc"].append(am)
elif am_yml["name"] == "mrs_banked":
am = pal.model.armv8a.access_mechanism.MRSBanked()
am.name = am_yml["name"]
am.m = am_yml["m"]
am.r = am_yml["r"]
am.m1 = am_yml["m1"]
register.access_mechanisms["mrs_banked"].append(am)
elif am_yml["name"] == "msr_banked":
am = pal.model.armv8a.access_mechanism.MSRBanked()
am.name = am_yml["name"]
am.m = am_yml["m"]
am.r = am_yml["r"]
am.m1 = am_yml["m1"]
register.access_mechanisms["msr_banked"].append(am)
elif am_yml["name"] == "msr_immediate":
am = pal.model.armv8a.access_mechanism.MSRImmediate()
am.name = am_yml["name"]
register.access_mechanisms["msr_immediate"].append(am)
elif am_yml["name"] == "ldr":
am = pal.model.armv8a.access_mechanism.LDR()
am.name = am_yml["name"]
am.offset = am_yml["offset"]
register.access_mechanisms["ldr"].append(am)
elif am_yml["name"] == "str":
am = pal.model.armv8a.access_mechanism.STR()
am.name = am_yml["name"]
am.offset = am_yml["offset"]
register.access_mechanisms["str"].append(am)
elif am_yml["name"] == "vmrs":
am = pal.model.armv8a.access_mechanism.VMRS()
am.name = am_yml["name"]
am.reg = am_yml["reg"]
am.operand_mnemonic = am_yml["operand_mnemonic"]
register.access_mechanisms["vmrs"].append(am)
elif am_yml["name"] == "vmsr":
am = pal.model.armv8a.access_mechanism.VMSR()
am.name = am_yml["name"]
am.reg = am_yml["reg"]
am.operand_mnemonic = am_yml["operand_mnemonic"]
register.access_mechanisms["vmsr"].append(am)
def _parse_fieldsets(self, register, yml):
if "fieldsets" not in yml:
fs = pal.model.Fieldset()
register.fieldsets.append(fs)
return
for fieldset_yml in yml["fieldsets"]:
fs = pal.model.Fieldset()
fs.size = fieldset_yml["size"]
if "name" in fieldset_yml:
fs.name = fieldset_yml["name"]
if "condition" in fieldset_yml:
fs.condition = fieldset_yml["condition"]
for field_yml in fieldset_yml["fields"]:
field = pal.model.Field()
field.name = self._strip_string(str(field_yml["name"]))
field.lsb = int(field_yml["lsb"])
field.msb = int(field_yml["msb"])
if "long_name" in field_yml:
field.long_name = self._strip_string(field_yml["long_name"])
if "description" in field_yml:
field.description = self._reflow_text(field_yml["description"])
if "readable" in field_yml:
field.readable = field_yml["readable"]
if "writable" in field_yml:
field.writable = field_yml["writable"]
if "lockable" in field_yml:
field.lockable = field_yml["lockable"]
if "write_once" in field_yml:
field.write_once = field_yml["write_once"]
if "write_1_clear" in field_yml:
field.write_1_clear = field_yml["write_1_clear"]
if "reserved0" in field_yml:
field.reserved0 = field_yml["reserved0"]
if "reserved1" in field_yml:
field.reserved1 = field_yml["reserved1"]
if "preserved" in field_yml:
field.preserved = field_yml["preserved"]
fs.fields.append(field)
register.fieldsets.append(fs)
def _reflow_text(self, text):
text = re.sub(r"\r?\n\s*", " ", text.strip())
return re.sub(r"^\"\s*|\s*\"$", "", text)
def _strip_string(self, string):
if string.startswith("\""):
return self._strip_string(string[1:])
elif string.startswith("\ "):
return self._strip_string(string[1:])
elif string.startswith("\n"):
return self._strip_string(string[1:])
elif string.endswith("\n"):
return self._strip_string(string[:-1])
elif string.endswith("\""):
return self._strip_string(string[:-1])
elif string.endswith(";"):
return self._strip_string(string[:-1])
elif string.endswith("\ "):
return self._strip_string(string[:-1])
string = string.replace("\n", "")
return string
| 41.303951
| 102
| 0.528221
|
5ea5d3e51cad7032b5d5bd793d2341e55a274cd8
| 2,169
|
py
|
Python
|
cs/compress.py
|
satabios/ressurect
|
4da1f0d59af69fce6c9f70a0ac8e989ae2f75cdb
|
[
"MIT"
] | null | null | null |
cs/compress.py
|
satabios/ressurect
|
4da1f0d59af69fce6c9f70a0ac8e989ae2f75cdb
|
[
"MIT"
] | null | null | null |
cs/compress.py
|
satabios/ressurect
|
4da1f0d59af69fce6c9f70a0ac8e989ae2f75cdb
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import glob
w,h=28,28
cmp_rate=6
def genBump(bumpSize, RowSize, ColSize, FrameNum):
BumpTime = np.zeros((RowSize, ColSize), dtype=np.int)
Mask = np.random.rand(RowSize, ColSize)
for i in range(FrameNum-bumpSize+1):
BumpTime[np.logical_and((i / (FrameNum-bumpSize+1)) < Mask, Mask <= ((i+1) / (FrameNum-bumpSize+1)))] = i
sens_cube = np.zeros((FrameNum, RowSize, ColSize))
for row in range(RowSize):
for col in range(ColSize):
start = BumpTime[row, col]
sens_cube[start:start + bumpSize, row, col] = 1
return sens_cube, BumpTime
sens, BumpTime = genBump(3, w,h,cmp_rate)
BumpTime=np.expand_dims(BumpTime,axis=0)
mainf = './OneDrive/Desktop/cs dataset/'
f = '/home/sathya/Desktop/cs_dataset/49503078599@N01_3238848486_5fa56606b7.avi'
cap = cv2.VideoCapture(f)
vid = []
compressed= []
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
for i in range(1,1+length):
# Capture frame-by-frame
# print((length%cmp_rate==0))
if(i%cmp_rate==0):
# print(i)
# ret, frame = cap.read()
# vid.append(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (240, 240)))
# image_list = np.asarray(image_list)
ret, frame = cap.read()
vid.append(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (w, h)))
image_list = np.asarray(vid)
image_list = np.reshape(image_list,(cmp_rate,w,h))
# image_list.shape
compressed_image = np.multiply(sens,image_list )
compressed_image = np.sum(compressed_image, 0)/3.
compressed_image=np.expand_dims(compressed_image,axis=0)
# print(image_list.shape,BumpTime.shape,compressed_image.shape)
cs = np.vstack((image_list,BumpTime,compressed_image))
compressed.append(cs)
vid=[]
else:
ret, frame = cap.read()
vid.append(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (w, h)))
video = np.asarray(compressed)
np.savez('/home/sathya/Desktop/cs_dataset/'+"vid.npz",video)
import matplotlib.pyplot as plt
plt.imshow(video[0,0,:,:])
plt.show()
plt.imshow(video[0,-1,:,:])
plt.show()
| 34.983871
| 113
| 0.65468
|
5e4faf9b779e306cbcbd689a82767a28fe5ab7cf
| 1,613
|
py
|
Python
|
cin/visualizer.py
|
phv2312/CenterNet
|
8b2a633e5951527ac0f72dfb3dab940f202b747c
|
[
"MIT"
] | null | null | null |
cin/visualizer.py
|
phv2312/CenterNet
|
8b2a633e5951527ac0f72dfb3dab940f202b747c
|
[
"MIT"
] | null | null | null |
cin/visualizer.py
|
phv2312/CenterNet
|
8b2a633e5951527ac0f72dfb3dab940f202b747c
|
[
"MIT"
] | null | null | null |
import json
from pycocotools.coco import COCO
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import cv2
image_directory = "/home/kan/Desktop/cinnamon/zalo/CenterNet/data/coco_zalo/images/"
lbl_path = "/home/kan/Desktop/cinnamon/zalo/CenterNet/data/coco_zalo/train.json"
json_data = json.load(open(lbl_path, 'r'))
example_coco = COCO(lbl_path)
categories = example_coco.loadCats(example_coco.getCatIds())
category_names = [category['name'] for category in categories]
print('Custom COCO categories: \n{}\n'.format(' '.join(category_names)))
category_names = set([category['supercategory'] for category in categories])
print('Custom COCO supercategories: \n{}'.format(' '.join(category_names)))
category_ids = []
image_ids = example_coco.getImgIds(catIds=category_ids)
while(True):
image_data = example_coco.loadImgs(image_ids[np.random.randint(0, len(image_ids))])[0]
# load and display instance annotations
image = io.imread(image_directory + image_data['file_name'])
# plt.imshow(image)
# plt.show()
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
annotation_ids = example_coco.getAnnIds(imgIds=image_data['id'], catIds=category_ids, iscrowd=None)
annotations = example_coco.loadAnns(annotation_ids)
for annot in annotations:
x_min, y_min, w, h = annot['bbox']
x_max = x_min + w
y_max = y_min + h
print(annot)
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (255,0,0), thickness=1)
#example_coco.showAnns(annotations, draw_bbox=True)
plt.imshow(image)
plt.show()
| 31.627451
| 103
| 0.720397
|
8f84d7330034f84613ecafb3254ab2822c11ebc6
| 1,168
|
py
|
Python
|
tests/run.py
|
seattleopendata/scrubadub
|
00522458640d1ba6eddf5b2772ebd0bbf62cb4e2
|
[
"MIT"
] | 3
|
2019-04-14T04:13:40.000Z
|
2020-04-22T05:10:28.000Z
|
tests/run.py
|
seattleopendata/scrubadub
|
00522458640d1ba6eddf5b2772ebd0bbf62cb4e2
|
[
"MIT"
] | null | null | null |
tests/run.py
|
seattleopendata/scrubadub
|
00522458640d1ba6eddf5b2772ebd0bbf62cb4e2
|
[
"MIT"
] | 3
|
2020-04-18T15:25:33.000Z
|
2021-06-12T02:58:01.000Z
|
#!/usr/bin/env python
"""Run the test suite that is specified in the .travis.yml file
"""
import os
import subprocess
import yaml
from colors import green, red
try:
unicode
except NameError:
basestring = str # Compatibility for Python 2 and 3
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def run_test(command):
wrapped_command = "cd %s && %s" % (root_dir, command)
pipe = subprocess.Popen(
wrapped_command, shell=True,
)
pipe.wait()
if pipe.returncode == 0:
print(green("TEST PASSED"))
else:
print(red("TEST FAILED"))
return pipe.returncode
# load the script tests from the .travis.yml file
with open(os.path.join(root_dir, '.travis.yml')) as stream:
travis_yml = yaml.load_all(stream.read())
config = next(travis_yml)
tests = config['script']
# run the tests
if isinstance(tests, basestring):
returncode = run_test(tests)
elif isinstance(tests, (list, tuple)):
returncode = 0
for test in tests:
returncode += run_test(test)
if returncode == 0:
print(green("ALL TESTS PASSED"))
else:
print(red("SOME TESTS FAILED, SEE ABOVE"))
| 22.901961
| 70
| 0.668664
|
c97cb08bc3ba5f715c5f1e5ab28b61678f24298a
| 10,636
|
py
|
Python
|
test/frontend/TopTenStudentsTest.py
|
drbobdugan/smoss
|
3232ddfbb89450143a0fbca54c9be75730e3b3ec
|
[
"MIT"
] | null | null | null |
test/frontend/TopTenStudentsTest.py
|
drbobdugan/smoss
|
3232ddfbb89450143a0fbca54c9be75730e3b3ec
|
[
"MIT"
] | 3
|
2018-04-15T16:34:00.000Z
|
2018-04-15T16:48:43.000Z
|
test/frontend/TopTenStudentsTest.py
|
CSC400-S18/smoss
|
3232ddfbb89450143a0fbca54c9be75730e3b3ec
|
[
"MIT"
] | 1
|
2019-02-21T02:27:40.000Z
|
2019-02-21T02:27:40.000Z
|
#!/usr/bin/env python3.6
#
# FILE: TopTenStudentsTest.py
# AUTHOR: wgreelish
# DATE: 9 APR 2018
#
# DESCRIPTION:
# This suite runs all of the UAT's from Story #154706475.
#
import test.frontend.FrontEndConfig as FrontEndConfig
import Config
class TopTenStudentsTest (FrontEndConfig.FrontEndTestSuite):
#
# setUp (): Specifies the build name for the test suite
#
def setUp (self):
FrontEndConfig.FrontEndTestSuite.setUp (self)
self.buildName = "154706475 - Top Ten Students"
#
# ExpiredURLSubmission (): Enters in an expired URL into the text ara and submits it
#
def test_ExpiredURLSubmission (self):
driver = self.InitializeBrowserStackConnection ("Test an Expired URL")
# Navigate to page
driver.get ("http://localhost:5000/")
# Find textarea and input expired URL
driver.find_element_by_name ("text").click ()
driver.find_element_by_name ("text").send_keys ("http://moss.stanford.edu/results/388411051")
# Submit form data
driver.find_element_by_xpath ("//input[@value='Submit']").click ()
# Assertion
self.assertEqual ("Uh Oh!", driver.find_element_by_xpath("//h1").text)
#
# InvalidURLSubmission (): Enters in an expired URL into the text ara and submits it
#
def test_InvalidURLSubmission (self):
driver = self.InitializeBrowserStackConnection ("Test an Invalid URL")
# Navigate to page
driver.get ("http://localhost:5000/")
# Find textarea and input expired URL
driver.find_element_by_name ("text").click ()
driver.find_element_by_name ("text").send_keys ("http://moss.stanford.edu/results/xyz")
# Submit form data
driver.find_element_by_xpath ("//input[@value='Submit']").click ()
# Assertion
self.assertEqual ("Uh Oh!", driver.find_element_by_xpath("//h1").text)
'''
def test_MultipleURLsTopTenLinesMultipleAssignments(self):
driver = self.InitializeBrowserStackConnection("Test Top Ten Lines Matched Multiples Assignments")
# Navigate to page
driver.get("http://localhost:5000/")
# Find textarea and input Single URLs
driver.find_element_by_name("text").click()
driver.find_element_by_name("text").clear()
driver.find_element_by_name("text").send_keys(self.testURL)
# Submit form data
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Select the sixth item in the list of submitted URLs and submit form data again
driver.find_element_by_xpath("(//input[@name='selection'])[2]").click()
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Store the values of the top ten lines matched
var1 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr/td[2]").text
var2 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[2]/td[2]").text
var3 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[3]/td[2]").text
var4 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[4]/td[2]").text
var5 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[5]/td[2]").text
var6 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[6]/td[2]").text
var7 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[7]/td[2]").text
var8 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[8]/td[2]").text
var9 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[9]/td[2]").text
var10 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[10]/td[2]").text
# Assertion
self.assertTrue(var1 >= var2 >= var3 >= var4 >= var5 >= var6 >= var7 >= var8 >= var9 >= var10)
'''
def test_MultipleURLsTopTenPercentsMultipleAssignments(self):
driver = self.InitializeBrowserStackConnection("Test Top Ten Percent Matched Multiple Assignments")
# Navigate to page
driver.get("http://localhost:5000/")
# Find textarea and input Single URLs
driver.find_element_by_name("text").click()
driver.find_element_by_name("text").clear()
driver.find_element_by_name("text").send_keys(self.testURL)
# Submit form data
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Select the sixth item in the list of submitted URLs and submit form data again
driver.find_element_by_xpath("(//input[@name='selection'])[2]").click()
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Store the values of the top ten lines matched
var1 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr/td[2]").text
var2 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[2]/td[2]").text
var3 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[3]/td[2]").text
var4 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[4]/td[2]").text
var5 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[5]/td[2]").text
var6 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[6]/td[2]").text
var7 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[7]/td[2]").text
var8 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[8]/td[2]").text
var9 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[9]/td[2]").text
var10 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[10]/td[2]").text
# Assertion
self.assertTrue(var1 >= var2 >= var3 >= var4 >= var5 >= var6 >= var7 >= var8 >= var9 >= var10)
'''
def test_MultipleURLsTopTenLinesSingleAssignment(self):
driver = self.InitializeBrowserStackConnection("Test Top Ten Lines Matched Single Assignment")
driver.get("http://localhost:5000/")
driver.find_element_by_name("text").click()
driver.find_element_by_name("text").clear()
driver.find_element_by_name("text").send_keys("http://moss.stanford.edu/results/47342166")
driver.find_element_by_xpath("//input[@value='Submit']").click()
driver.find_element_by_xpath("(//input[@name='selection'])[2]").click()
driver.find_element_by_xpath("//input[@value='Submit']").click()
var1 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr/td[2]").text
var2 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[2]/td[2]").text
var3 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[3]/td[2]").text
var4 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[4]/td[2]").text
var5 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[5]/td[2]").text
var6 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[6]/td[2]").text
var7 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[7]/td[2]").text
var8 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[8]/td[2]").text
var9 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[9]/td[2]").text
var10 = driver.find_element_by_xpath("//table[@id='HighestLinesMatched']/tbody/tr[10]/td[2]").text
# Assertion
self.assertTrue(var1 >= var2 >= var3 >= var4 >= var5 >= var6 >= var7 >= var8 >= var9 >= var10)
'''
def test_MultipleURLsTopTenPercentSingleAssignment(self):
driver = self.InitializeBrowserStackConnection("Test Top Ten Percent Matched Single Assignment")
# Navigate to page
driver.get("http://localhost:5000/")
# Find textarea and input Single URLs
driver.find_element_by_name("text").click()
driver.find_element_by_name("text").clear()
driver.find_element_by_name("text").send_keys(self.testURL)
# Submit form data
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Select the sixth item in the list of submitted URLs and submit form data again
driver.find_element_by_xpath("(//input[@name='selection'])[2]").click()
driver.find_element_by_xpath("//input[@value='Submit']").click()
# Store the values of the top ten lines matched
var1 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr/td[2]").text
var2 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[2]/td[2]").text
var3 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[3]/td[2]").text
var4 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[4]/td[2]").text
var5 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[5]/td[2]").text
var6 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[6]/td[2]").text
var7 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[7]/td[2]").text
var8 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[8]/td[2]").text
var9 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[9]/td[2]").text
var10 = driver.find_element_by_xpath("//table[@id='HighestPercentMatched']/tbody/tr[10]/td[2]").text
# Assertion
self.assertTrue(var1 >= var2 >= var3 >= var4 >= var5 >= var6 >= var7 >= var8 >= var9 >= var10)
def test_MultipleURLFormSubmission(self):
driver = self.InitializeBrowserStackConnection("Test Multiple URL Form Submission")
driver = self.driver
driver.get("http://localhost:5000/")
driver.find_element_by_name("text").click()
driver.find_element_by_name("text").clear()
driver.find_element_by_name("text").send_keys('\n'.join(self.testURLGroup))
driver.find_element_by_xpath("//input[@value='Submit']").click()
| 54.54359
| 109
| 0.658236
|
b016a927306ec9de84174d86515e4241f11f5204
| 2,056
|
py
|
Python
|
Controller/storage/models/LinearRegressor/LinearRegressor.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | null | null | null |
Controller/storage/models/LinearRegressor/LinearRegressor.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | 7
|
2020-07-19T03:29:21.000Z
|
2022-03-02T06:46:12.000Z
|
Controller/storage/models/LinearRegressor/LinearRegressor.py
|
th-nuernberg/ml-cloud
|
6d7527cbf6cceb7062e74dbc43d51998381aa6c8
|
[
"MIT"
] | null | null | null |
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.optimizers import Adam
from Algorithm import Algorithm
from KerasCallback import StatusCallback
class LinearRegressor(Algorithm):
def __init__(self, data, labels, config, job_id, api):
self.__data = data
self.__labels = labels
self.__config = config['parameters']
self.__arch_config_id = config['architecture_config_id']
self.__job_id = job_id
self.__api = api
n_features = data.shape[1]
mean = self.__data.mean(axis=0)
self.__data -= mean
std = self.__data.std(axis=0)
self.__data /= std
# self.__model = Sequential()
# self.__model.add(layer=Dense(units=1, input_dim=n_features, activation='linear'))
self.__model = Sequential()
self.__model.add(Dense(64, activation='relu', input_dim=n_features))
self.__model.add(Dense(64, activation='relu'))
self.__model.add(Dense(1))
self.__model.compile(optimizer='rmsprop',
loss='mse',
metrics=['mae', 'acc'])
def fit(self):
self.__model.fit(x=self.__data,
y=self.__labels,
batch_size=self.__config['batch_size'],
epochs=self.__config['epochs'],
validation_split=self.__config['validation_split'],
shuffle=self.__config['shuffle'],
callbacks=[StatusCallback(api=self.__api, job_id=self.__job_id)])
def evaluate(self, data, labels):
self.__model.evaluate(x=data, y=labels)
def predict(self, data):
return self.__model.predict(x=data)
def save(self, filepath):
self.__model.save(filepath=filepath + '/model.h5')
def load(self, filepath):
self.__model = load_model(filepath=filepath)
def check_data(self, data, labels):
pass
| 31.151515
| 91
| 0.608463
|
d10117e72d76c93138dcb83f50e31acc23720837
| 1,754
|
py
|
Python
|
server/HonorificsConvert.py
|
YoshiharuSenna-ucl/C_2002
|
676a137ae30804001b8119a08ba554471c765d3d
|
[
"MIT"
] | null | null | null |
server/HonorificsConvert.py
|
YoshiharuSenna-ucl/C_2002
|
676a137ae30804001b8119a08ba554471c765d3d
|
[
"MIT"
] | null | null | null |
server/HonorificsConvert.py
|
YoshiharuSenna-ucl/C_2002
|
676a137ae30804001b8119a08ba554471c765d3d
|
[
"MIT"
] | null | null | null |
from goolabs import GoolabsAPI
import json
# api取得
app_id = "9707a9ca41154956524fe5ef01ba774b4305ccc701adfb6be574a87ba4a5687b"
api = GoolabsAPI(app_id)
# 元のテキストデータ
f = open('before.txt', 'r', encoding='UTF-8')
data = f.read()
OriginalText = data
# 単語探索関数
def SearchForWords(sentence):
for start in range(len(sentence)):
for end in range(len(sentence) - 1, start - 1, -1):
testKey = ''
for check in range(start, end + 1):
testKey += sentence[check][0]
if testKey in HumbleLangDict:
if testKey not in HitWordList:
HitWordList.append(testKey)
# 単語置き換え用関数
def ChangeWord(text, HitWordList):
ConvertedText = text
for word in HitWordList:
ConvertedText = ConvertedText.replace(word, HumbleLangDict[word])
return ConvertedText
# 敬語変換関数
def ChangeToHonorific(text):
# 辞書データ取得
json_open = open('sample.json', 'r')
global HumbleLangDict
HumbleLangDict = json.load(json_open)
print(json.dumps(HumbleLangDict, indent=2).encode().decode('unicode-escape'))
global HitWordList
HitWordList = []
# See sample response below.
response = api.morph(sentence = text)
# 文章ごとに変換
for sentence in response['word_list']:
SearchForWords(sentence)
print(HitWordList)
ConvertedText = ChangeWord(text, HitWordList)
return ConvertedText
# 敬語変換関数呼び出し
ChangeText = ChangeToHonorific(OriginalText)
print(ChangeText)
print(OriginalText)
f.close()
# 結果をファイルに書き出す
path_w = 'result.txt'
with open(path_w, mode='w') as f:
f.write(ChangeText)
f.close()
| 28.290323
| 84
| 0.624287
|
04d39a13cf7fd288423ed6cb0f96c4a6b88155f0
| 210
|
py
|
Python
|
atest/testdata/standard_libraries/remote/timeouts.py
|
rdagum/robotframework
|
b7069d505374e9f09a140ed5a9727d2a40716446
|
[
"ECL-2.0",
"Apache-2.0"
] | 7,073
|
2015-01-01T17:19:16.000Z
|
2022-03-31T22:01:29.000Z
|
atest/testdata/standard_libraries/remote/timeouts.py
|
imust6226/robotframework
|
08c56fef2ebc64d682c7f99acd77c480d8d0e028
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,412
|
2015-01-02T09:29:05.000Z
|
2022-03-31T13:10:46.000Z
|
atest/testdata/standard_libraries/remote/timeouts.py
|
3mdeb/robotframework
|
6006ce0b3d5fc6b45c5eb040dc859acd64bfa846
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,298
|
2015-01-03T02:47:15.000Z
|
2022-03-31T02:00:16.000Z
|
import sys
import time
from remoteserver import RemoteServer
class Timeouts:
def sleep(self, secs):
time.sleep(int(secs))
if __name__ == '__main__':
RemoteServer(Timeouts(), *sys.argv[1:])
| 15
| 43
| 0.685714
|
b98de44e0912033e1dc635ac311c679e3a903daa
| 7,922
|
py
|
Python
|
src/main_regression.py
|
Jiangtong-Li/ZHSIR
|
fd2c0a7e79f22cbf565ccd5e13342f1b317ac9b7
|
[
"Apache-2.0"
] | 8
|
2019-09-29T02:29:16.000Z
|
2020-12-01T13:48:01.000Z
|
src/main_regression.py
|
Jiangtong-Li/ZHSIR
|
fd2c0a7e79f22cbf565ccd5e13342f1b317ac9b7
|
[
"Apache-2.0"
] | null | null | null |
src/main_regression.py
|
Jiangtong-Li/ZHSIR
|
fd2c0a7e79f22cbf565ccd5e13342f1b317ac9b7
|
[
"Apache-2.0"
] | 1
|
2021-02-13T09:27:05.000Z
|
2021-02-13T09:27:05.000Z
|
import os
import random
import time
import numpy as np
from scipy.spatial.distance import cdist
import cv2
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.optim import Adam, SGD
from torch.utils.tensorboard import SummaryWriter
from package.model.regression import Regressor
from package.loss.regularization import _Regularization
from package.dataset.data_cmd_translate import CMDTrans_data
from package.args.cvae_args import parse_config
from package.dataset.utils import make_logger
from package import cal_matrics_single
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(args):
writer = SummaryWriter()
logger = make_logger(args.log_file)
if args.zs:
packed = args.packed_pkl_zs
else:
packed = args.packed_pkl_nozs
logger.info('Loading the data ...')
data = CMDTrans_data(args.sketch_dir, args.image_dir, args.stats_file, args.embedding_file,
packed, args.preprocess_data, args.raw_data, zs=args.zs, sample_time=1,
cvae=True, paired=False, cut_part=False)
dataloader_train = DataLoader(dataset=data, num_workers=args.num_worker, \
batch_size=args.batch_size,
shuffle=args.shuffle)
logger.info('Training sketch size: {}'.format(len(data.path2class_sketch.keys())))
logger.info('Training image size: {}'.format(len(data.path2class_image.keys())))
logger.info('Testing sketch size: {}'.format(len(data.path2class_sketch_test.keys())))
logger.info('Testing image size: {}'.format(len(data.path2class_image_test.keys())))
logger.info('Building the model ...')
model = Regressor(args.raw_size, args.hidden_size, dropout_prob=args.dropout, logger=logger)
logger.info('Building the optimizer ...')
optimizer = Adam(params=model.parameters(), lr=args.lr, betas=(0.5, 0.999))
l1_regularization = _Regularization(model, args.l1_weight, p=1, logger=logger)
l2_regularization = _Regularization(model, args.l2_weight, p=2, logger=logger)
if args.start_from is not None:
logger.info('Loading pretrained model from {} ...'.format(args.start_from))
ckpt = torch.load(args.start_from, map_location='cpu')
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
if args.gpu_id != -1:
model.cuda(args.gpu_id)
optimizer.zero_grad()
loss_tri_acm = 0.
loss_l1_acm = 0.
loss_l2_acm = 0.
batch_acm = 0
global_step = 0
best_precision = 0.
best_iter = 0
patience = args.patience
logger.info('Hyper-Parameter:')
logger.info(args)
logger.info('Model Structure:')
logger.info(model)
logger.info('Begin Training !')
while True:
if patience <= 0:
break
for sketch_batch, image_p_batch, image_n_batch, _semantics_batch in dataloader_train:
sketch_batch = sketch_batch.float()
image_p_batch = image_p_batch.float()
image_n_batch = image_n_batch.float()
if global_step % args.print_every == 0 % args.print_every and global_step and batch_acm % args.cum_num == 0:
logger.info('*** Iter {} ***'.format(global_step))
logger.info(' Loss/Triplet {:.3}'.format(loss_tri_acm/args.print_every/args.cum_num))
logger.info(' Loss/L1 {:.3}'.format(loss_l1_acm/args.print_every/args.cum_num))
logger.info(' Loss/L2 {:.3}'.format(loss_l2_acm/args.print_every/args.cum_num))
loss_tri_acm = 0.
loss_l1_acm = 0.
loss_l2_acm = 0.
if global_step % args.save_every == 0 % args.save_every and batch_acm % args.cum_num == 0 and global_step :
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
torch.save({'args':args, 'model':model.state_dict(), 'optimizer':optimizer.state_dict()},
'{}/Iter_{}.pkl'.format(args.save_dir,global_step))
### Evaluation
model.eval()
image_label = list()
image_feature = list()
for image, label in data.load_test_images(batch_size=args.batch_size):
image = image.float()
if args.gpu_id != -1:
image = image.cuda(args.gpu_id)
image_label += label
tmp_feature = model.inference_image(image).cpu().detach().numpy()
image_feature.append(tmp_feature)
image_feature = np.vstack(image_feature)
sketch_label = list()
sketch_feature = list()
for sketch, label in data.load_test_sketch(batch_size=args.batch_size):
sketch = sketch.float()
if args.gpu_id != -1:
sketch = sketch.cuda(args.gpu_id)
sketch_label += label
tmp_feature = model.inference_sketch(sketch).cpu().detach().numpy()
sketch_feature.append(tmp_feature)
sketch_feature = np.vstack(sketch_feature)
Precision, mAP, = cal_matrics_single(image_feature, image_label, sketch_feature, sketch_label)
writer.add_scalar('Precision_200/cosine', Precision, global_step)
writer.add_scalar('mAP_200/cosine', mAP, global_step)
logger.info('*** Evaluation Iter {} ***'.format(global_step))
logger.info(' Precision {:.3}'.format(Precision))
logger.info(' mAP {:.3}'.format(mAP))
if best_precision < Precision:
patience = args.patience
best_precision = Precision
best_iter = global_step
writer.add_scalar('Best/Precision_200', best_precision, best_iter)
logger.info('Iter {}, Best Precision_200 {:.3}'.format(global_step, best_precision))
torch.save({'args':args, 'model':model.state_dict(), \
'optimizer':optimizer.state_dict()}, '{}/Best.pkl'.format(args.save_dir))
else:
patience -= 1
if patience <= 0:
break
model.train()
batch_acm += 1
if global_step <= args.warmup_steps:
update_lr(optimizer, args.lr*global_step/args.warmup_steps)
if args.gpu_id != -1:
sketch_batch = sketch_batch.cuda(args.gpu_id)
image_p_batch = image_p_batch.cuda(args.gpu_id)
image_n_batch = image_n_batch.cuda(args.gpu_id)
loss = model(sketch_batch, image_p_batch, image_n_batch)
loss_l1 = l1_regularization()
loss_l2 = l2_regularization()
loss_tri = loss.item()
loss_l1_acm += (loss_l1.item() / args.l1_weight)
loss_l2_acm += (loss_l2.item() / args.l2_weight)
loss_tri_acm += loss_tri
writer.add_scalar('Loss/Triplet', loss_tri, global_step)
writer.add_scalar('Loss/Reg_l1', (loss_l1.item() / args.l1_weight), global_step)
writer.add_scalar('Loss/Reg_l2', (loss_l2.item() / args.l2_weight), global_step)
loss_ = 0
loss_ += loss
loss_.backward()
if batch_acm % args.cum_num == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
global_step += 1
optimizer.zero_grad()
if __name__ == '__main__':
args = parse_config()
train(args)
| 43.054348
| 120
| 0.603888
|
bbe071b26b1869a44f3ea342ed537d69dfa78cb0
| 423
|
py
|
Python
|
New_Home_Django/asgi.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | null | null | null |
New_Home_Django/asgi.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | 7
|
2020-04-18T04:54:05.000Z
|
2020-04-29T14:49:46.000Z
|
New_Home_Django/asgi.py
|
Ymirrp/Home-page
|
6ac9b5b76cc2b08298086c7e784685dad802c9d6
|
[
"MIT"
] | null | null | null |
"""
ASGI config for New_Home_Django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'New_Home_Django.settings')
application = get_asgi_application()
| 24.882353
| 79
| 0.763593
|
d6fd6bdde757e08f6ca80aa15925b0a6c6015195
| 2,033
|
py
|
Python
|
Inheritance.py
|
ShreyashSalian/Python-Multiple-Inheritance
|
d152d12829ea438ce1725fd0feddcb5ae8343ca7
|
[
"MIT"
] | null | null | null |
Inheritance.py
|
ShreyashSalian/Python-Multiple-Inheritance
|
d152d12829ea438ce1725fd0feddcb5ae8343ca7
|
[
"MIT"
] | null | null | null |
Inheritance.py
|
ShreyashSalian/Python-Multiple-Inheritance
|
d152d12829ea438ce1725fd0feddcb5ae8343ca7
|
[
"MIT"
] | null | null | null |
class Student:
StudentCount = 0
def __init__(self,StudentId = 0,StudentName = "",StudentPhone =""):
self.StudentId = StudentId
self.StudentName = StudentName
self.StudentPhone = StudentPhone
Student.StudentCount += 1
def showCount(self):
print("Total instances of Student is:",Student.StudentCount)
def showData(self):
print("Student Id is",self.StudentId)
print("Student Name is", self.StudentName)
print("Student Phone is", self.StudentPhone)
def setData(self,StudentId = 0,StudentName = "",StudentPhone =""):
self.StudentId = StudentId
self.StudentName = StudentName
self.StudentPhone = StudentPhone
#Student.StudentCount += 1
class Science:
def __init__(self,Physics = 0.0,Chemistry=0.0):
self.Physics = Physics
self.Chemistry = Chemistry
def showData(self):
print("Physics Marks is : ",self.Physics)
print("Chemistry Marks is :",self.Chemistry)
def setData(self,Physics = 0.0,Chemistry=0.0):
self.Physics = Physics
self.Chemistry = Chemistry
class Results(Student,Science):
def __init__(self,StudentId = 0,StudentName = "",StudentPhone = "",Physcis = 0.0,Chemistry = 0.0):
Student.__init__(self,StudentId,StudentName,StudentPhone)
Science.__init__(self,Physcis,Chemistry)
self.total = Physcis + Chemistry
self.percentage = self.total/200 * 100
def setData(self,StudentId = 0,StudentName = "",StudentPhone ="",Physics = 0.0,Chemistry = 0.0):
Student.__init__(self, StudentId, StudentName, StudentPhone)
Science.__init__(self, Physics, Chemistry)
self.total = Physics + Chemistry
self.percentage = self.total / 200 * 100
def showData(self):
Student.showData(self)
Science.showData(self)
print("Total Marks :",self.total)
print("Percentage :",self.percentage)
a = Results(1,"Shreyash","344534334",89.9,90.6)
a.showData()
a.showCount()
| 33.883333
| 102
| 0.65273
|
34f5e09c69ab414686add2d5468af4495a9732ee
| 10,622
|
py
|
Python
|
configs/scene_graph/VG_SgDet_transformer_mask_X_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 24
|
2021-10-14T03:28:28.000Z
|
2022-03-29T09:30:04.000Z
|
configs/scene_graph/VG_SgDet_transformer_mask_X_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-12-14T15:04:49.000Z
|
2022-02-19T09:54:42.000Z
|
configs/scene_graph/VG_SgDet_transformer_mask_X_rcnn_x101_64x4d_fpn_1x.py
|
yizhe-ang/MMSceneGraph
|
d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba
|
[
"MIT"
] | 4
|
2021-10-31T11:23:06.000Z
|
2021-12-17T06:38:50.000Z
|
# dataset settings
dataset_type = 'VisualGenomeDataset'
data_root = 'data/visualgenome/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
roidb_file=data_root + 'VG-SGG-with-attri.h5',
dict_file=data_root + 'VG-SGG-dicts-with-attri.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=train_pipeline,
num_im=-1,
num_val_im=5000,
split='train',
img_prefix=data_root + 'Images/'),
val=dict(
type=dataset_type,
roidb_file=data_root + 'VG-SGG-with-attri.h5',
dict_file=data_root + 'VG-SGG-dicts-with-attri.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
num_val_im=5000,
split='val',
img_prefix=data_root + 'Images/'),
test=dict(
type=dataset_type,
roidb_file=data_root + 'VG-SGG-with-attri.h5',
dict_file=data_root + 'VG-SGG-dicts-with-attri.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'Images/'))
# model settings
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VG_statistics.cache'))
model = dict(
type='MaskRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='ExtrDetWeightSharedFCBBoxHead',
num_fcs=2,
extract_type='concat',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=151,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='TransferMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=151,
transfer_cfg=dict(num_fc=2, fc_in=5120, hidden_neurons=[1024, 256], relu='LeakyReLU', mlp_fusion=True),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
relation_head=dict(
type='TransformerHead',
dataset_config=dataset_config,
num_classes=151,
num_predicates=51,
use_bias=True,
head_config=dict(
use_gt_box=False,
use_gt_label=False,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.1,
context_object_layer=4,
context_edge_layer=2,
num_head=8,
inner_dim=1024,
k_dim=64,
v_dim=64,
glove_dir='data/glove/',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False, # for sgdet training, not require
num_sample_per_gt_rel=4,
num_rel_per_image=1024,
pos_fraction=0.25,
test_overlap=True # for testing
),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50, # Follow the setting in TDE, 80 Bboxes are selected.
mask_thr_binary=0.5,
rle_mask_encode=False, # do not transform the mask into rle.
crop_mask=True, # so that the mask shape is the same as bbox, instead of image shape
format_mask_result=False, # do not transform to the result format like bbox
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='sgdet', relation_mode=True, classwise=True)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'mask_head'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VG_SgDet_transformer_mask_X_rcnn_x101_64x4d_fpn_1x'
load_from = './experiments/VG_COCOremap_MASKTRANS_mask_rcnn_x101_64x4d_fpn_1x/latest.pth'
# load_mapping = dict(align_dict={'relation_head.bbox_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs',
# 'relation_head.relation_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs'})
resume_from = None
workflow = [('train', 1), ('val', 1)]
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| 36.627586
| 115
| 0.58473
|
1a217fa9fcfc7179fcc26505ada1bb22f8574827
| 1,677
|
py
|
Python
|
python/y_2015/day12.py
|
AJSKirk/aoc
|
68a66f0f37b532db6d0752bf04ed7698b7cd8a5a
|
[
"MIT"
] | 4
|
2020-12-21T03:12:01.000Z
|
2021-12-20T06:23:13.000Z
|
python/y_2015/day12.py
|
AJSKirk/aoc
|
68a66f0f37b532db6d0752bf04ed7698b7cd8a5a
|
[
"MIT"
] | null | null | null |
python/y_2015/day12.py
|
AJSKirk/aoc
|
68a66f0f37b532db6d0752bf04ed7698b7cd8a5a
|
[
"MIT"
] | 2
|
2021-12-12T15:28:55.000Z
|
2021-12-20T06:23:19.000Z
|
import sys
import re
import operator
from functools import reduce
import math
EXTRACT_REGEXP = r'(\[)|(\])|(\{)|(\})|(-*\d+)|(red)'
def neg_safe_cast(token):
"""Needed because an .isnumeric() check fails to pick up negatives"""
try:
return int(token)
except ValueError:
return 0
def total_numerics(tokens):
return reduce(operator.add, map(neg_safe_cast, tokens))
def no_red_sum(tokens):
"""Using import json is cheating, let's parse it ourselves in a sinlge pass. Hope you like stacks."""
sums = [0]
stack = []
is_red = False
for token in tokens:
if token == 'red' and not is_red and stack[-1] == '{':
is_red = True
sums[-1] = 0
stack.append('red')
elif token == '{':
sums.append(0)
stack.append('{')
elif token == '}':
last_sum = sums.pop()
sums[-1] += last_sum
if stack[-1] == 'red':
stack.pop()
is_red = False
stack.pop()
elif token == '[':
stack.append('[')
sums.append(0)
elif token == ']':
stack.pop()
last_sum = sums.pop()
sums[-1] += last_sum
elif not is_red:
sums[-1] += neg_safe_cast(token)
assert len(sums) == 1
return sums.pop()
def get_tokens(raw_json):
return map(lambda m: m.group(), re.finditer(EXTRACT_REGEXP, raw_json))
def main():
with open(sys.argv[1], 'r') as f:
tokens = get_tokens(f.read().strip())
#print(total_numerics(tokens))
print(no_red_sum(tokens))
if __name__ == "__main__":
main()
| 23.619718
| 105
| 0.53548
|
034a6c37d2c5885f8fcd8b4c6d0b8f3d855bd70a
| 475
|
py
|
Python
|
classification/migrations/0045_clinvarexport_release_status.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
classification/migrations/0045_clinvarexport_release_status.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
classification/migrations/0045_clinvarexport_release_status.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-08-02 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classification', '0044_auto_20210730_1252'),
]
operations = [
migrations.AddField(
model_name='clinvarexport',
name='release_status',
field=models.CharField(choices=[('R', 'Release When Ready'), ('H', 'On Hold')], default='R', max_length=1),
),
]
| 25
| 119
| 0.608421
|
45c5f081c090a86f0ff0ab30b362afc5d48d66b8
| 1,298
|
py
|
Python
|
HotelManagement/FoodService/views.py
|
sriram012/Hotel-Management
|
94a838895a69843ba0112e9a5ea5674362eb66a3
|
[
"Unlicense"
] | null | null | null |
HotelManagement/FoodService/views.py
|
sriram012/Hotel-Management
|
94a838895a69843ba0112e9a5ea5674362eb66a3
|
[
"Unlicense"
] | null | null | null |
HotelManagement/FoodService/views.py
|
sriram012/Hotel-Management
|
94a838895a69843ba0112e9a5ea5674362eb66a3
|
[
"Unlicense"
] | 1
|
2019-01-05T12:53:57.000Z
|
2019-01-05T12:53:57.000Z
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from RoomsManagement.models import *
from .models import *
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from django.contrib import messages
from . import decoraters
from Customers import decoraters as cust_dec
# Food management Login...
@cust_dec.user_not_logged_in
def food_management_login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
user_inst = User.objects.get(username=username)
if hasattr(user_inst, 'customers'):
if user.is_active:
login(request, user)
return redirect('customers:home')
else:
return HttpResponse('account not active')
else:
messages.error(request, f'Invalid Login details')
else:
messages.error(request, f'Invalid Login details')
return redirect(reverse('food_management:login'))
else:
return render(request, 'food_management/login.html')
| 32.45
| 65
| 0.664869
|
376442b97e87eea1bf724515b2dc881cc0cfecbf
| 1,262
|
py
|
Python
|
apps/i3wm/i3wm.py
|
ma-anwar/knausj_talon
|
c4cf4659ffba1abf76e99f99ec376cca04bb6291
|
[
"MIT"
] | null | null | null |
apps/i3wm/i3wm.py
|
ma-anwar/knausj_talon
|
c4cf4659ffba1abf76e99f99ec376cca04bb6291
|
[
"MIT"
] | null | null | null |
apps/i3wm/i3wm.py
|
ma-anwar/knausj_talon
|
c4cf4659ffba1abf76e99f99ec376cca04bb6291
|
[
"MIT"
] | null | null | null |
from talon import Context, Module, actions, settings, ui
mod = Module()
mod.tag("i3wm", desc="tag for loading i3wm related files")
mod.setting(
"i3_config_path",
type=str,
default="~/.i3/config",
desc="Where to find the configuration path",
)
mod.setting(
"i3_mod_key",
type=str,
default="super",
desc="The default key to use for i3wm commands",
)
ctx = Context()
ctx.matches = r"""
mode:command
"""
ctx.lists['self.i3applications'] = {
'firefox':'firefox',
'code':'code',
'telegram': 'telegram',
'licks':'lyx',
'files':'caja',
'screenshot':"mate-screenshot -a",
}
mod.list("i3applications", desc="applications")
@mod.capture(rule="{self.i3applications}")
def i3applications(m) -> str:
"Returns a string"
return m.i3applications
@mod.action_class
class Actions:
def i3wm_launch():
"""Trigger the i3 launcher: ex rofi"""
key = settings.get("user.i3_mod_key")
actions.key(f"{key}-d")
def i3wm_shell():
"""Launch a shell"""
key = settings.get("user.i3_mod_key")
actions.key(f"{key}-enter")
def i3wm_lock():
"""Trigger the lock screen"""
key = settings.get("user.i3_mod_key")
actions.key(f"{key}-shift-x")
| 22.945455
| 58
| 0.617274
|
aaa65b3ab6117c31bf474703c34b559bb6978e81
| 23,480
|
py
|
Python
|
src/modelSuite/exampleSanity.py
|
mirofedurco/PyAstronomy
|
b0e5806a18bde647654e6c9de323327803722864
|
[
"MIT"
] | 98
|
2015-01-01T12:46:05.000Z
|
2022-02-13T14:17:36.000Z
|
src/modelSuite/exampleSanity.py
|
mirofedurco/PyAstronomy
|
b0e5806a18bde647654e6c9de323327803722864
|
[
"MIT"
] | 46
|
2015-02-10T19:53:38.000Z
|
2022-01-11T17:26:05.000Z
|
src/modelSuite/exampleSanity.py
|
mirofedurco/PyAstronomy
|
b0e5806a18bde647654e6c9de323327803722864
|
[
"MIT"
] | 38
|
2015-01-08T17:00:34.000Z
|
2022-03-04T05:15:22.000Z
|
from __future__ import print_function, division
import unittest
import os
class ModSuiteSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
# Clean up example output from KeplerEllipseModel example
if os.path.isfile("kemExample.tmp"):
os.remove("kemExample.tmp")
def sanity_rmcl_model(self):
""" Checking sanity of RmcL calculation (example) """
# Import some unrelated modules
from numpy import arange, pi
import matplotlib.pylab as plt
# ... and the model suite
from PyAstronomy import modelSuite as ms
# Create Rossiter-McLaughlin object
rmcl = ms.RmcL()
# Set parameters
rmcl.assignValue({"a": 6.7, "lambda": 7.2 / 180.0 * pi, "epsilon": 0.5,
"P": 1.74, "T0": 0.2, "i": 87.8 / 180. * pi,
"Is": 90.0 / 180.0 * pi, "Omega": 1.609e-5, "gamma": 0.2})
# Choose some time axis and calculate model
time = arange(100) / 100.0 * 0.2 + 0.1
rv = rmcl.evaluate(time)
# Let's see what happened...
plt.ylabel("Radial velocity [stellar-radii/s]")
plt.xlabel("Time [d]")
plt.plot(time, rv, '.')
# plt.show()
def sanity_rmcl_fit(self):
""" Checking sanity of RmcL fit (example) """
# Import some unrelated modules
from numpy import arange, pi, random
import matplotlib.pylab as plt
# ... and the model suite
from PyAstronomy import modelSuite as ms
# Create Rossiter-McLaughlin object
rmcl = ms.RmcL()
# Set parameters
rmcl.assignValue({"a": 6.7, "lambda": 7.2 / 180.0 * pi, "epsilon": 0.5,
"P": 1.74, "T0": 0.2, "i": 87.8 / 180. * pi,
"Is": 90.0 / 180.0 * pi, "Omega": 1.609e-5, "gamma": 0.2})
# Choose some time axis and calculate model
time = arange(100) / 100.0 * 0.2 + 0.1
rv = rmcl.evaluate(time)
# Add some noise.
rv += random.normal(0.0, 0.05 * rv.max(), rv.size)
# Assign guess parameters
rmcl.assignValue({"a": 6.0, "lambda": 7.2 / 180.0 * pi, "epsilon": 0.5,
"P": 1.74, "T0": 0.17, "i": 87.8 / 180. * pi,
"Is": 90.0 / 180.0 * pi, "Omega": 1.609e-5, "gamma": 0.2})
# Thaw parameters and fit
rmcl.thaw(["a", "T0"])
rmcl.fit(time, rv)
# Investigate the outcome
rmcl.parameterSummary()
# Let's see what happened...
plt.ylabel("Radial velocity [stellar-radii/s]")
plt.xlabel("Time [d]")
plt.plot(time, rv, '.')
plt.plot(time, rmcl.model, 'r--')
plt.legend(["Observation", "Model"])
# plt.show()
def sanity_rmclell_calc(self):
"""
Checking sanity of RmcLell (example)
"""
# Import some unrelated modules
from numpy import arange, pi
import matplotlib.pylab as plt
# ... and the model suite
from PyAstronomy import modelSuite as ms
# Create Rossiter-McLaughlin object (circular orbit)
rmcl = ms.RmcL()
# and one for an elliptical orbit
rmel = ms.RmcLell()
# Assign parameter values
rmcl.assignValue({"a":6.7, "lambda":7.2/180.0*pi, "epsilon":0.5, \
"P":1.74, "T0":0.2, "i":87.8/180.*pi, \
"Is":90.0/180.0*pi, "Omega":1.609e-5, "gamma":0.2})
rmel.assignValue({"a":6.7, "lambda":7.2/180.0*pi, "epsilon":0.5, \
"P":1.74, "tau":0.2, "i":87.8/180.*pi, "w":-90/180.*pi, \
"e":0.05, "Is":90.0/180.0*pi, "Omega":1.609e-5, "gamma":0.2})
# Choose some time axis and calculate model
time = arange(100)/100.0 * 0.2 + 0.1
rvc = rmcl.evaluate(time)
rve = rmel.evaluate(time)
# Let's see what happened...
# plt.ylabel("Radial velocity [stellar-radii/s]")
# plt.xlabel("Time [d]")
# plt.plot(time, rvc, 'b.-', label="circular")
# plt.plot(time, rve, 'r.-', label="elliptical")
# plt.legend()
# plt.show()
def sanity_rmcl_vs_rmclell(self):
""" Cross-checking Rmcl and RmcLell """
from numpy import arange, pi
import numpy as np
# ... and the model suite
from PyAstronomy import modelSuite as ms
# Create Rossiter-McLaughlin object
rmcl = ms.RmcL()
r2 = ms.RmcLell()
np.random.seed(9234667)
for i in range(10):
a = np.random.random()*5 + 3
l = np.random.random()*180 - 90
inc = np.random.random()*2 + 88
Omega = 1e-5 + np.random.random()*1e-5
# Set parameters
rmcl.assignValue({"a":a, "lambda":l/180.0*pi, "epsilon":0.5, \
"P":1.74, "T0":0.2, "i":inc/180.*pi, \
"Is":80.0/180.0*pi, "Omega":Omega, "gamma":0.2})
# Set parameters
r2.assignValue({"a":a, "lambda":l/180.0*pi, "epsilon":0.5, \
"P":1.74, "tau":0.2, "i":inc/180.*pi, \
"Is":80.0/180.0*pi, "Omega":Omega, "gamma":0.2,
"e":0.0, "w":-90/180*pi})
# Choose some time axis and calculate model
time = arange(20)/20.0 * 0.2 - 0.1 + rmcl["T0"]
rv = rmcl.evaluate(time)
rv2 = r2.evaluate(time)
d = np.max(np.abs(rv-rv2))
m = np.max(np.abs(rv))
self.assertAlmostEqual(d/m, 0.0, delta=1e-8, msg="Elliptical and circular orbit solution for RmcL and RmcLell do not match. " + \
str(r2.parameters()))
def sanity_SinRadVel(self):
# Import some unrelated modules
from numpy import arange, random, ones
import matplotlib.pylab as plt
# ... and now the radVel module
from PyAstronomy.modelSuite import radVel as rv
# Create Radial Velocity object
r = rv.SinRadVel()
# Set parameters
r.assignValue({"P": 1.8, "T0": 0.25, "K": 0.5, "rv0": 10.0})
# Choose some time axis and calculate model
time = arange(100) / 100.0 * 3.0 - 1.5
y = r.evaluate(time)
# Create some faked data by adding noise
rvData = y + random.normal(0.0, 0.05, y.size)
# Randomize starting parameters for fit
for p, v in r.parameters().items():
r[p] = v + (random.random() - 0.5) * v
# Show starting values
print("Starting values for fit:")
r.parameterSummary()
# Thaw all parameters
r.thaw(list(r.parameters().keys()))
# Start the fit
r.fit(time, rvData, yerr=ones(y.size) * 0.05)
# Show fit results
print("Fitted values:")
r.parameterSummary()
# Let's see what happened...
plt.ylabel("Radial velocity [km/s]")
plt.xlabel("Radial velocity [d]")
plt.errorbar(time, rvData, yerr=ones(y.size) * 0.05, fmt='b.')
plt.plot(time, y, 'r-')
# plt.show()
def sanity_KeplerEllipseModel(self):
from PyAstronomy.modelSuite import KeplerEllipseModel
import numpy as np
import matplotlib.pylab as plt
# Create a model class instance
# In this case, we are only interested
# in the x- and z-components of the orbit
# solution.
kem = KeplerEllipseModel(relevantAxes="xz")
# Setting some guess parameters
kem["a"] = 7.8
kem["per"] = 12.3
kem["e"] = 0.07
kem["tau"] = 0.745
kem["Omega"] = 143.
kem["w"] = 0.2
kem["i"] = 92.0
# Evaluate the model
time = np.linspace(0, kem["per"], 20)
model = kem.evaluate(time)
# Note that the model has twice the number of points
# compared to the time axis. This is because it contains
# the data for two axes
print("Used " + str(len(time)) + " time points")
print("-> length of model: ", len(model))
# Isolating the model for the x-axis, i.e.,
# every second data point starting from the
# beginning.
xmodel = model[0::2]
# Isolating the model for the y-axis
ymodel = model[1::2]
# Use the model to obtain mock data
# by introducing some scatter
data = model + np.random.normal(0., 0.5, model.size)
# Plot the resulting "data"
plt.title("Kepler Ellipse Model --- Example")
plt.errorbar(data[0::2], data[1::2], xerr=np.ones(20) * 0.5,
yerr=np.ones(20) * 0.5, fmt="bp")
# Use MCMC to sample from the posterior
# Specify free parameters
kem.thaw(["a", "per", "e", "tau", "Omega", "w", "i"])
# Specify starting values
X0 = {}
steps = {}
for p in kem.freeParameters():
X0[p] = kem[p]
steps[p] = kem[p] / 20.
lims = {"a": [5., 10.], "per": [10., 15.], "e": [0., 1.], "tau": [0.5, 1.],
"Omega": [0., 360.], "w": [-5., 5.], "i": [90., 95.]}
kem.fitMCMC(time, data, X0, lims, steps, yerr=np.ones(len(data)) * 0.5,
iter=500, dbfile="kemExample.tmp")
# Plot the lowest deviance model
ldmodel = kem.evaluate(np.linspace(0, kem["per"], 200))
plt.plot(ldmodel[0::2], ldmodel[1::2], 'r--')
# plt.show()
# def sanity_atanProfile(self):
# from PyAstronomy import modelSuite as ms
# import numpy as np
# import matplotlib.pylab as plt
#
# # Create an instance of the AtanProfile ...
# ap = ms.AtanProfile()
# # ... and define some starting values
# ap["A"] = 1.0
# ap["mu"] = 5.0
# ap["scale"] = 0.4
# ap["sig"] = 5.0
#
# # Plot profile on given x-axis
# x = np.linspace(-5,15,100)
# plt.plot(x, ap.evaluate(x), 'b.-')
#
# # Determine the locations of the inflection
# # points
# print "Inflection points: ", ap.inflectionPoints()
#
# # Create instance of damped profile and copy
# # the values from the first profile
# apd = ms.AtanProfileDamped()
# for p, v in ap.parameters().iteritems():
# apd[p] = v
#
# # Specify the additional damping parameter
# apd["tau"] = 2.0
# # and plot
# plt.plot(x, apd.evaluate(x), 'r.-')
# plt.show()
def sanity_lineListGaussModel(self):
"""
Checking example of line list Gauss model
"""
from PyAstronomy import modelSuite as ms
import numpy as np
import matplotlib.pylab as plt
# Create our line list with 4 line
lineList = np.zeros((4, 3))
# Assign wavelengths (in A)
lineList[0, 0] = 5002.37
lineList[1, 0] = 5005.9
lineList[2, 0] = 5007.52
lineList[3, 0] = 5007.64
# Assign EWs (in A)
lineList[0, 1] = 0.01
lineList[1, 1] = 0.05
lineList[2, 1] = 0.009
lineList[3, 1] = 0.12
# Assign depths (0-1)
lineList[0, 2] = 0.97
lineList[1, 2] = 0.9
lineList[2, 2] = 0.99
lineList[3, 2] = 0.35
wvl = np.arange(5000., 5010., 0.01)
# Get an instance of the LLGauss class
llg = ms.LLGauss(lineList)
# Have a look at the model parameters
llg.parameterSummary()
# Evaluate the model
m1 = llg.evaluate(wvl)
# Now apply rotational broadening [km/s]
# with limb-darkening of 0.6
llg["vsini"] = 61.0
llg["eps"] = 0.6
# and evaluate again
mvsini = llg.evaluate(wvl)
# Next, apply a Doppler shift [km/s]
llg["vrad"] = -32.7
# and evaluate
mvrad = llg.evaluate(wvl)
# Plot the results
plt.subplot(2, 1, 1)
plt.plot(wvl, m1, 'b.-')
plt.plot(wvl, mvsini, 'g.-')
plt.plot(wvl, mvrad, 'y.-')
# Now use the model for fitting
# We need "data" ...
data = llg.evaluate(wvl)
# ... with noise
data += np.random.normal(0.0, 0.01, len(data))
# Lets modify the strengths of the Gaussians
# and get it back.
for i in range(llg.numberOfLines()):
llg["A" + str(i + 1)] += np.random.normal(0.0, 0.1)
# Use all line strengths for fitting
llg.thawLineStrengths()
# and fit
llg.fit(wvl, data)
# Plot the result
plt.subplot(2, 1, 2)
plt.errorbar(wvl, data, yerr=np.ones(len(wvl)) * 0.01, fmt='bp')
plt.plot(wvl, llg.evaluate(wvl), 'r--')
# plt.show()
def sanity_VoigtAstroPExample(self):
"""
Sanity of VoigtAstroP example
"""
from PyAstronomy import modelSuite as ms
import numpy as np
import matplotlib.pylab as plt
# Obtain an object of type VoigtAstroP ...
v = ms.VoigtAstroP()
# ... and set some parameters
v["b"] = 87.7
v["f"] = 0.5
v["w0"] = 1214.0
# Damping constant [cm]
v["gamma"] = 2e-9
# Generate wavelength axis ...
wvl = np.linspace(1212., 1216., 200)
# ... and evaluate model
m = v.evaluate(wvl)
# Plot result
plt.plot(wvl, m, 'b.-')
# plt.show()
def sanity_VoigtAstroP_R_Example(self):
"""
Sanity of VoigtAstroP example with instrumental resolution
"""
from PyAstronomy import modelSuite as ms
import numpy as np
import matplotlib.pylab as plt
# Obtain an object of type VoigtAstroP ...
v = ms.VoigtAstroP()
# ... and set some parameters
v["b"] = 40.7
v["f"] = 0.5
v["w0"] = 1214.0
# Damping constant [cm]
v["gamma"] = 2e-9
# Generate wavelength axis ...
wvl = np.linspace(1212.,1216.,200)
# ... and evaluate model
m = v.evaluate(wvl)
# Add (Gaussian) instrumental broadening with resolution 5000
v["R"] = 5000
mr = v.evaluate(wvl)
# Plot result
# plt.plot(wvl, m, 'b.-', label="R = inf")
# plt.plot(wvl, mr, 'r.-', label="R = 5000")
# plt.legend()
# plt.show()
def sanity_LyATransmission(self):
"""
Checking sanity of LyATransmission example
"""
from PyAstronomy import modelSuite as ms
import numpy as np
import matplotlib.pylab as plt
la = ms.LyaTransmission()
# Set some parameters
la["N"] = 5e17
la["b"] = 12.2
la["Dfrac"] = 1.9e-5
# Set up wavelength axis ...
wvl = np.linspace(1214., 1217., 1000)
# ... and evaluate model
m = la.evaluate(wvl)
# Plot the result
plt.plot(wvl, m, 'b.-')
# plt.show()
def sanity_RotBroadProfileExample(self):
"""
Example of rotational broadening.
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy import modelSuite as ms
# Get an instance of the model ...
x = ms.RotBroadProfile()
# ... and define some starting value
x["xmax"] = 60.0
x["A"] = 1.0
x["eps"] = 0.8
x["off"] = 0.0
# Define a radial velocity axis
vv = np.linspace(-90., 90., 200)
# Construct some "data" and ...
data = x.evaluate(vv)
# ... add noise
data += np.random.normal(0.0, 1e-3, data.size)
# Fit the model using A, xmax, and eps as free
# parameters ...
x.thaw(["A", "xmax", "eps"])
x.fit(vv, data)
# ... and show the resulting parameter values.
x.parameterSummary()
# Plot the data and the model
plt.plot(vv, data, 'bp')
plt.plot(vv, x.model, 'r--')
# plt.show()
def sanity_RotBroadProfile(self):
"""
Checking RotBroadProfile
"""
import numpy as np
from PyAstronomy import modelSuite as ms
import scipy.integrate as sci
# Get an instance of the model ...
x = ms.RotBroadProfile()
vv = np.linspace(-90., 90., 200)
for i in range(10):
# ... and define some starting value
x["xmax"] = np.random.random() * 50.0 + 30.0
x["A"] = np.random.random() * 10.0 + 1.0
x["eps"] = np.random.random()
d = x.evaluate(vv)
a = sci.trapz(d, vv)
self.assertAlmostEqual(x["A"], a, delta=1.0 / 200., msg="Incorrect profile normalization (" +
"%g vs %g)" % (x["A"], a))
x["eps"] = 0.0
x["xmax"] = 50.0
x["A"] = 1.0
vv = np.linspace(-x["xmax"], x["xmax"], 100)
d = x.evaluate(vv)
y = d - 2.0 / (np.pi * x["xmax"]) * np.sqrt(1.0 - (vv / x["xmax"])**2)
self.assertFalse(np.any(np.abs(y) > 1e-6),
msg="Incorrect profile for eps=0.0")
def sanity_KeplerRVModel_example(self):
"""
Checking sanity of KeplerRVModel example
"""
import numpy as np
import matplotlib.pylab as plt
from PyAstronomy.modelSuite import KeplerRVModel
from PyAstronomy import funcFit as fuf
# Generate artificial data ...
jd = np.arange(100)
rv = 1.5 * np.sin(jd / 37.6 * 2.*np.pi)
# ... with some error
rverr = 0.5
rv += np.random.normal(0, rverr, len(jd))
rverr = np.ones(len(rv)) * 0.5
# Get RV model with one planet (mp) and a potential constant offset
# in RV (deg = 0)
krvm = KeplerRVModel(mp=1, deg=0)
# To obtain some useful estimate of the minimum mass of the companion,
# we must specify the mass of the star (in terms of solar masses)
krvm["mstar"] = 0.5
# Let us have a look at the available parameters.
# Note that not all are meant for fitting in this model (MA and a)!
# There is also not much use in fitting 'mstar'. It may, however, be
# used in combination with a prior to take into account its uncertainty in
# the estimates.
krvm.parameterSummary(sorting="ps")
# We specify some guess parameters.
krvm["per1"] = 37.0
krvm["K1"] = 1.0
krvm["e1"] = 0.0
krvm["tau1"] = 17.0
krvm["w1"] = 180.
# Let us fit all of these but period ...
krvm.thaw(["K1", "tau1", "w1", "e1", "c0"])
# ... and now also the period
krvm.thaw(["per1"])
krvm.fit(jd, rv, yerr=rverr)
# and then get the best-fit model
kmo = krvm.evaluate(jd)
# What about chi-square and RMS?
chi = np.sum( (rv - krvm.model)**2 / rverr**2 )
# Reduced chi-square
rchi = chi / (len(rv) - len(krvm.freeParameters()))
print("chi-square and reduced chi-square: %6.3f, %6.3f" % (chi, rchi))
rms = np.std(rv - krvm.model)
print("RMS: ", rms)
plt.title("RV data (blue) and model (red)")
plt.errorbar(jd, rv, yerr=rverr, fmt='b+')
plt.plot(jd, krvm.model, 'r-')
#=======================================================================
# plt.show()
#=======================================================================
# Now let us do some posterior-based error analysis using MCMC
# Say, we want 20 burn-in iterations and, thereafter,
# 50 further iterations (per walker).
sampleArgs = {"iters":50, "burn":100}
# Specify a bounded uniform prior on the eccentricity. Note that restrictions are not
# automatically converted into priors (they may not ne uniform). Potentially further prior,
# e.g., on per1 may be required to prevent wandering into 'forbidden territory'.
priors = {"e1":fuf.FuFPrior("limuniform", upper=1, lower=0)}
# Start the sampling (ps could be used to continue the sampling)
ps = krvm.fitEMCEE(jd, rv, yerr=rverr, sampleArgs=sampleArgs, scales={"e":0.05}, dbfile="chain1.emcee", \
priors=priors)
# Have a look at the posterior
ta = fuf.TraceAnalysis("chain1.emcee")
# What about the deviance (-2 log(Likelihood))
ta.plotTraceHist("deviance")
#=======================================================================
# ta.show()
#=======================================================================
# Expectation value and highest probability density interval for eccentricity
ta.plotTraceHist("e1")
print("Expectation value for eccentricity: ", ta.mean("e1"))
print("90% HPD for eccentricity: ", ta.hpd("e1", cred=0.9))
#=======================================================================
# ta.show()
#=======================================================================
class VoigtAstroPSanity(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def sanity_normalization(self):
"""
Normalization of AstroVoigtP
"""
import numpy as np
from PyAstronomy import modelSuite as ms
from PyAstronomy import pyaC
v = ms.VoigtAstroP()
# Hypothetical wvls [A]
w0s = [1000., 5000, 10000.]
# (pi e**2)/(m_e c)
const = (4.803e-10)**2*np.pi / (9.11e-28*29979245800.0)
fs = [1, 100]
for f in fs:
for w0 in w0s:
v["w0"] = w0
v["b"] = 100.
v["gamma"] = 1e-10
v["f"] = f
dw = 20.0
w = np.linspace(w0-dw, w0+dw, 1000)
m = v.evaluate(w)
i = pyaC.ibtrapz(w/1e8, m*29979245800.0/(w/1e8)**2 , (w0-dw)/1e8, (w0+dw)/1e8)
self.assertAlmostEqual(i/const, f, delta=1e-2, msg="Normalization of AstroVoigtP is broken: f, w0, i: % g, % g, % g" % (f, w0, i))
def sanity_instrumentalResolution(self):
"""
Checking integrity of instrumental resolution in VoigtAstroP
"""
import numpy as np
from PyAstronomy import modelSuite as ms
from PyAstronomy import pyasl
v = ms.VoigtAstroP()
w0 = 10830
for R in [2500, 5000, 80000]:
v["w0"] = w0
v["b"] = 10.
v["gamma"] = 1e-8
v["f"] = 100.0
v["R"] = 0
dw = 40.0
w = np.linspace(w0-dw, w0+dw, 4000)
m = v.evaluate(w)
v["R"] = R
m2 = v.evaluate(w)
fb = pyasl.instrBroadGaussFast(w, m, R, edgeHandling=None, fullout=False, maxsig=None)
d = 1000
self.assertAlmostEqual(np.max(np.abs(fb[d:-d] - m2[d:-d])/m2[d:-d]), 0.0, delta=1e-10,
msg="VoigtAstroP instrumental broadening broken for R = " + str(R))
| 33.542857
| 146
| 0.504685
|
a472281a82c1649d6149bfd1b15761cc70ac650d
| 376
|
py
|
Python
|
apps/general/urls.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | null | null | null |
apps/general/urls.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | null | null | null |
apps/general/urls.py
|
LvanArkel/sbzwebsite
|
a26efbb050585312c53010f14f86c23616a8071f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', view=views.index, name='index'),
url(r'^construction/', view=views.construction, name='construction'),
url(r'^contact/', view=views.contact, name='contact'),
url(r'^login$', view=views.login, name='login'),
url(r'^logout$', view=views.logout, name='logout'),
]
| 28.923077
| 74
| 0.632979
|
2239b3cd12e697c73f5aaf5dcc5e2c8017275b41
| 58
|
py
|
Python
|
sqstaskmaster/__init__.py
|
upserve/sqstaskmaster
|
284e67f080411731f9642717ff28908e81988cba
|
[
"MIT"
] | null | null | null |
sqstaskmaster/__init__.py
|
upserve/sqstaskmaster
|
284e67f080411731f9642717ff28908e81988cba
|
[
"MIT"
] | 1
|
2019-10-16T18:27:58.000Z
|
2019-10-16T18:27:58.000Z
|
sqstaskmaster/__init__.py
|
upserve/sqstaskmaster
|
284e67f080411731f9642717ff28908e81988cba
|
[
"MIT"
] | null | null | null |
# Do not import dependencies here or setup.py will break.
| 29
| 57
| 0.775862
|
409d5309a09d1e8d198dd24fecd39c551bef374d
| 1,087
|
py
|
Python
|
pyspedas/mms/particles/mms_pgs_make_phi_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 75
|
2019-02-22T12:59:33.000Z
|
2022-02-26T15:33:20.000Z
|
pyspedas/mms/particles/mms_pgs_make_phi_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 40
|
2019-07-02T07:46:34.000Z
|
2022-02-23T21:48:50.000Z
|
pyspedas/mms/particles/mms_pgs_make_phi_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 43
|
2019-02-22T13:03:41.000Z
|
2022-01-24T19:26:59.000Z
|
import numpy as np
# use nansum from bottleneck if it's installed, otherwise use the numpy one
try:
import bottleneck as bn
nansum = bn.nansum
except ImportError:
nansum = np.nansum
def mms_pgs_make_phi_spec(data_in, resolution=32):
data = data_in.copy()
n_phi = resolution
# zero inactive bins to ensure areas with no data are represented as NaN
zero_bins = np.argwhere(data['bins'] == 0)
if zero_bins.size != 0:
for item in zero_bins:
data['data'][item[0], item[1]] = 0.0
ave = np.zeros(n_phi)
bin_size = 360.0/n_phi
outbins = np.arange(0, 361, bin_size)
phi_flat = data['phi'].flatten()
data_flat = data['data'].flatten()
bins_flat = data['bins'].flatten()
for bin_idx in range(0, len(outbins)-1):
this_bin = np.argwhere((phi_flat >= outbins[bin_idx]) & (phi_flat < outbins[bin_idx+1]))
if len(this_bin) > 0:
ave[bin_idx] += nansum(data_flat[this_bin])/nansum(bins_flat[this_bin])
y = outbins[0:n_phi]+0.5*(outbins[1::]-outbins[0:n_phi])
return (y, ave)
| 28.605263
| 96
| 0.638454
|
68a870178712676a1c7a9dce92b035a665d339dc
| 619
|
py
|
Python
|
unknowntags/forms.py
|
rickvanderzwet/makerspaceleiden-crm
|
a36d5073d4c49bbf46580ca9bbda5d80593d2d84
|
[
"Apache-2.0"
] | null | null | null |
unknowntags/forms.py
|
rickvanderzwet/makerspaceleiden-crm
|
a36d5073d4c49bbf46580ca9bbda5d80593d2d84
|
[
"Apache-2.0"
] | null | null | null |
unknowntags/forms.py
|
rickvanderzwet/makerspaceleiden-crm
|
a36d5073d4c49bbf46580ca9bbda5d80593d2d84
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from django.conf import settings
from members.models import User
from .models import Unknowntag
class SelectUserForm(forms.Form):
user = forms.ModelChoiceField(queryset=User.objects.all())
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.')
class SelectTagForm(forms.Form):
tag = forms.ModelChoiceField(queryset=Unknowntag.objects.all())
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet.')
| 41.266667
| 130
| 0.781906
|
92f46091790564c2d855b876970e36c1e5908414
| 6,315
|
py
|
Python
|
src/main.py
|
paradiseHIT/text-embeddings
|
97c522110ae1a095a2f294a2a484d7bb51567e0e
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
paradiseHIT/text-embeddings
|
97c522110ae1a095a2f294a2a484d7bb51567e0e
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
paradiseHIT/text-embeddings
|
97c522110ae1a095a2f294a2a484d7bb51567e0e
|
[
"Apache-2.0"
] | null | null | null |
import json
import time
import ssl
import sys, getopt
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
from flask import Flask, redirect, url_for
app = Flask(__name__)
# Use tensorflow 1 behavior to match the Universal Sentence Encoder
# examples (https://tfhub.dev/google/universal-sentence-encoder/2).
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
##### INDEXING #####
def index_data():
print("Creating the 'posts' index.")
client.indices.delete(index=INDEX_NAME, ignore=[404])
with open(INDEX_FILE) as index_file:
source = index_file.read().strip()
client.indices.create(index=INDEX_NAME, body=source)
docs = []
count = 0
with open(DATA_FILE) as data_file:
for line in data_file:
line = line.strip()
doc = json.loads(line)
if doc["type"] != "question":
continue
docs.append(doc)
count += 1
if count % BATCH_SIZE == 0:
index_batch(docs)
docs = []
print("Indexed {} documents.".format(count))
if docs:
index_batch(docs)
print("Indexed {} documents.".format(count))
client.indices.refresh(index=INDEX_NAME)
print("Done indexing.")
def index_batch(docs):
titles = [doc["title"] for doc in docs]
title_vectors = embed_text(titles)
requests = []
for i, doc in enumerate(docs):
request = doc
request["_op_type"] = "index"
request["_index"] = INDEX_NAME
request["title_vector"] = title_vectors[i]
requests.append(request)
bulk(client, requests)
##### SEARCHING #####
def run_query_loop():
while True:
try:
handle_query()
except KeyboardInterrupt:
return
@app.route('/query/<query>')
def handle_query2(query):
embedding_start = time.time()
query_vector = embed_text([query])[0]
embedding_time = time.time() - embedding_start
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'title_vector') + 1.0",
"params": {"query_vector": query_vector}
}
}
}
search_start = time.time()
body={
"size": SEARCH_SIZE,
"query": script_query,
"_source": {"includes": ["title"]}
}
response = client.search(
index=INDEX_NAME,
body=body
)
search_time = time.time() - search_start
print()
print("{} total hits.".format(response["hits"]["total"]["value"]))
print("embedding time: {:.2f} ms".format(embedding_time * 1000))
print("search time: {:.2f} ms".format(search_time * 1000))
ret_str=""
for hit in response["hits"]["hits"]:
print("id: {}, score: {}".format(hit["_id"], hit["_score"]))
print(hit["_source"])
ret_str = ret_str + " " + hit["_source"]["title"]
print()
return ret_str
def handle_query():
query = input("Enter query: ")
embedding_start = time.time()
query_vector = embed_text([query])[0]
embedding_time = time.time() - embedding_start
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'title_vector') + 1.0",
"params": {"query_vector": query_vector}
}
}
}
search_start = time.time()
body={
"size": SEARCH_SIZE,
"query": script_query,
"_source": {"includes": ["title"]}
}
response = client.search(
index=INDEX_NAME,
body=body
)
search_time = time.time() - search_start
print()
print("{} total hits.".format(response["hits"]["total"]["value"]))
print("embedding time: {:.2f} ms".format(embedding_time * 1000))
print("search time: {:.2f} ms".format(search_time * 1000))
for hit in response["hits"]["hits"]:
print("id: {}, score: {}".format(hit["_id"], hit["_score"]))
print(hit["_source"])
print()
##### EMBEDDING #####
def embed_text(text):
vectors = session.run(embeddings, feed_dict={text_ph: text})
return [vector.tolist() for vector in vectors]
##### MAIN SCRIPT #####
if __name__ == '__main__':
INDEX_NAME = "posts"
INDEX_FILE = "data/posts/index.json"
DATA_FILE = "data/posts/posts.json"
BATCH_SIZE = 1000
SEARCH_SIZE = 5
GPU_LIMIT = 0.5
model_dir = None
ca_cert_file = None
try:
opts, args = getopt.getopt(sys.argv[1:],"hm:c:",["model_dir=","ca_cert_file="])
except getopt.GetoptError:
print('main.py -m <model_dir> -c <ca_cert_file>')
sys.exit(-1)
for opt, arg in opts:
if opt == '-h':
print('main.py -m <model_dir> -c <ca_cert_file>')
sys.exit(0)
elif opt in ("-m", "--model_dir"):
model_dir = arg
elif opt in ("-c", "--ca_cert_file"):
ca_cert_file = arg
if model_dir == None:
print("model_dir is None")
sys.exit(-1)
if ca_cert_file == None:
print("ca_cert_file is None")
sys.exit(-1)
print('model_dir=%s' % str(model_dir))
print('ca_cert_file=%s' % str(ca_cert_file))
#print("Downloading pre-trained embeddings from tensorflow hub...")
#embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2")
embed = hub.Module(model_dir)
text_ph = tf.placeholder(tf.string)
embeddings = embed(text_ph)
print("Creating tensorflow session...")
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = GPU_LIMIT
session = tf.Session(config=config)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print("Done.")
ES=["localhost:9200"]
context = ssl._create_unverified_context()
client = Elasticsearch(ES,
ca_certs=ca_cert_file,
scheme="https",
ssl_context=context,
http_auth=('elastic', 'zLD*uPqtDNoybExIkEgt'))
#index_data()
#run_query_loop()
app.run(host="0.0.0.0", port=8000)
print("Closing tensorflow session...")
session.close()
print("Done.")
| 28.574661
| 88
| 0.589865
|
e438eddc7c439fdfba2af32b310eecbb6a27fad5
| 576
|
py
|
Python
|
module3/RC 2.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
module3/RC 2.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
module3/RC 2.py
|
aspadm/labworks
|
8b04a40656f0791b191e7e6a980c10afc77cd041
|
[
"MIT"
] | null | null | null |
# Кириллов, ИУ7-12, вариант
# В целочисленном одномерном массиве A(N) (N<=100) переставить нечётные
# элементы в начало, сохраняя порядок.
# Вывести исходный и полученный массивы, дополнительных не использовать.
print('Задайте целочисленный массив в строку:')
A = list(map(int,input().split()))
N = len(A)
k = 0
print('\nИсходный массив:')
for i in range(N):
print(A[i],end=' ')
for i in range(N):
if A[i]&1:
A = A[:k]+[A[i]]+A[k:i]+A[i+1:]
k += 1
print('\nПолученный массив:')
for i in range(N):
print(A[i],end=' ')
| 23.04
| 73
| 0.605903
|
9e5f29727795824f0ab387b3186faf282afe622e
| 204
|
py
|
Python
|
video/core/admin.py
|
eltonjncorreia/youtube-deeper
|
2348345a19154968a25dcd81b9780a400923419d
|
[
"MIT"
] | null | null | null |
video/core/admin.py
|
eltonjncorreia/youtube-deeper
|
2348345a19154968a25dcd81b9780a400923419d
|
[
"MIT"
] | null | null | null |
video/core/admin.py
|
eltonjncorreia/youtube-deeper
|
2348345a19154968a25dcd81b9780a400923419d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from video.core.models import Video, Thumb, Theme, Comment
admin.site.register(Video)
admin.site.register(Thumb)
admin.site.register(Theme)
admin.site.register(Comment)
| 22.666667
| 58
| 0.808824
|
46abbbaa422a491a2d8c67288b6cbba45531bc64
| 2,724
|
py
|
Python
|
592_fractionAddSubtract.py
|
stuti-rastogi/leetcode-python-solutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | 4
|
2018-07-24T08:36:42.000Z
|
2019-08-25T17:48:47.000Z
|
592_fractionAddSubtract.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
592_fractionAddSubtract.py
|
stuti-rastogi/leetcodesolutions
|
73593fe642a06a83cde974ba5e6de3a7b396ec84
|
[
"MIT"
] | null | null | null |
class Fraction:
def __init__(self, num, denom, isNegative):
self.numerator = num
self.denominator = denom
self.negative = isNegative
def convertToString(self):
'''
Convert a fraction object to string format like -1/3 or 3/1
'''
result = []
if self.negative:
result.append('-')
result.append(str(self.numerator))
result.append('/')
result.append(str(self.denominator))
return "".join(result)
class Solution:
def fractionAddition(self, expression: str) -> str:
expr_it = 0
expr_len = len(expression)
# will store all fraction objects
fractions = []
while (expr_it < expr_len):
# store the sign for this fraction
if expression[expr_it] == '-':
isNegative = True
expr_it += 1
elif expression[expr_it] == '+':
isNegative = False
expr_it += 1
else:
# in the beginning we can have nothing at the beginning of this fraction
isNegative = False
numerator = 0
while (expression[expr_it] != '/'):
numerator = (numerator * 10) + int(expression[expr_it])
expr_it += 1
denominator = 0
# to skip over the '/'
expr_it += 1
while (expr_it < expr_len and expression[expr_it] != '+' and expression[expr_it] != '-'):
denominator = (denominator * 10) + int(expression[expr_it])
expr_it += 1
fractions.append(Fraction(numerator, denominator, isNegative))
commonDenominator = 1
# product of all denominators
for fraction in fractions:
commonDenominator *= fraction.denominator
commonNumerator = 0
for fraction in fractions:
scaleFactor = commonDenominator // fraction.denominator
if fraction.negative:
sign = -1
else:
sign = 1
# add/subtract the numerator scaled by the product of all other denominators
commonNumerator += (scaleFactor * fraction.numerator * sign)
if commonNumerator < 0:
negativeAns = True
else:
negativeAns = False
# reduce fraction to simplest form
scaleDown = math.gcd(abs(commonNumerator), commonDenominator)
resultNumerator = abs(commonNumerator) // scaleDown
resultDenominator = commonDenominator // scaleDown
resultFraction = Fraction(resultNumerator, resultDenominator, negativeAns)
return resultFraction.convertToString()
| 34.481013
| 101
| 0.564611
|
7d082876fd320b7c237ae49ad6a891806323e559
| 3,241
|
py
|
Python
|
ci/cleanup-geoserver-layers.py
|
venicegeo/dg-pz-access
|
314cf2461899ac0aae82d1eabbbc191c07c2d326
|
[
"Apache-2.0"
] | null | null | null |
ci/cleanup-geoserver-layers.py
|
venicegeo/dg-pz-access
|
314cf2461899ac0aae82d1eabbbc191c07c2d326
|
[
"Apache-2.0"
] | 4
|
2016-02-24T18:36:00.000Z
|
2017-11-30T16:48:00.000Z
|
scripts/cleanup-geoserver-layers.py
|
venicegeo/piazza
|
fff367677b389ce0d4e62a2414cc991e8796a397
|
[
"Apache-2.0"
] | 1
|
2020-10-01T14:25:47.000Z
|
2020-10-01T14:25:47.000Z
|
#!/usr/bin/env python2
# Copyright 2016, RadiantBlue Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
from requests.auth import HTTPBasicAuth
import json
import re
import sys
def getBadLayer(geoserverUri):
# WMS Request to Root GeoServer Layer
uri = geoserverUri + '/geoserver/wms?request=GetCapabilities&service=wms&version=1.1.1'
response = requests.get(uri)
if 'Error occurred trying to write out metadata for layer:' in response.text:
# A Bad Layer is found. Return the name.
guid = re.findall("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", response.text)
return guid[0]
return None
def deleteLayer(geoserverUri, username, password, layer):
# Deletes a bad GeoServer Layer/Data Store
uri = geoserverUri + '/geoserver/rest/layers/' + layer
response = requests.delete(uri, auth=HTTPBasicAuth(username, password))
print 'Culling ' + layer + ', response was ' + str(response.status_code)
if response.status_code == 500:
if 'Unable to delete layer referenced by layer group' in response.text:
# Delete the Layer Group
guid = re.findall("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", response.text)
deleteUri = geoserverUri + '/geoserver/rest/workspaces/piazza/layergroups/' + guid[0] + '.json'
response = requests.delete(deleteUri, auth=HTTPBasicAuth(username, password))
print 'Culled Layer Group ' + guid[0] + ', response was ' + str(response.status_code)
if response.status_code == 500:
print response.text
# Try to Delete the Layer again
response = requests.delete(uri, auth=HTTPBasicAuth(username, password))
print 'Retry culling ' + layer + ', response was ' + str(response.status_code)
if response.status_code == 500:
print response.text
print 'Could not delete layer. Exiting.'
sys.exit(1)
else:
print 'Could not delete layer. Exiting.'
sys.exit(1)
def main():
# Pull in required variables from command line
parser = argparse.ArgumentParser(
description='Cull corrupted layers from GeoServer.')
parser.add_argument('-g', help='GeoServer URI')
parser.add_argument('-u', help='GeoServer UserName')
parser.add_argument('-p', help='GeoServer Password')
args = parser.parse_args()
geoserverUri = args.g
username = args.u
password = args.p
# Check for Bad Layers
print 'Begin culling of Bad Layers'
badLayer = getBadLayer(geoserverUri)
while (badLayer is not None):
# Delete the Layer
deleteLayer(geoserverUri, username, password, badLayer)
# Check Again
badLayer = getBadLayer(geoserverUri)
# No more Bad Layers, nothing to do
print 'Done culling Bad Layers'
if __name__ == "__main__":
main()
| 39.048193
| 99
| 0.712743
|
b39fef0fee3372e229be27802fe92bd850e75dae
| 6,725
|
py
|
Python
|
ckan/views/__init__.py
|
smartcolumbus-ide/scc_ide_ckan
|
f607d0a2f3110fe0d5e2627b2d414112d4cb3863
|
[
"Apache-2.0"
] | 3
|
2020-03-07T02:47:04.000Z
|
2020-09-11T05:40:41.000Z
|
ckan/views/__init__.py
|
smartcolumbus-ide/scc_ide_ckan
|
f607d0a2f3110fe0d5e2627b2d414112d4cb3863
|
[
"Apache-2.0"
] | 4
|
2018-07-25T07:09:35.000Z
|
2019-01-23T07:21:23.000Z
|
ckan/views/__init__.py
|
Pilchards/ckan
|
729480f82345df1e2d753c94c5e0541a2aff9bd8
|
[
"Apache-2.0"
] | 1
|
2017-11-03T14:55:25.000Z
|
2017-11-03T14:55:25.000Z
|
# encoding: utf-8
from paste.deploy.converters import asbool
import ckan.model as model
from ckan.common import g, request, config, session
from ckan.lib.helpers import redirect_to as redirect
import ckan.plugins as p
import logging
log = logging.getLogger(__name__)
APIKEY_HEADER_NAME_KEY = u'apikey_header_name'
APIKEY_HEADER_NAME_DEFAULT = u'X-CKAN-API-Key'
def check_session_cookie(response):
u'''
The cookies for auth (auth_tkt) and session (ckan) are separate. This
checks whether a user is logged in, and determines the validity of the
session cookie, removing it if necessary.
'''
for cookie in request.cookies:
# Remove the ckan session cookie if logged out.
if cookie == u'ckan' and not getattr(g, u'user', None):
# Check session for valid data (including flash messages)
is_valid_cookie_data = False
for key, value in session.items():
if not key.startswith(u'_') and value:
is_valid_cookie_data = True
break
if not is_valid_cookie_data:
if session.id:
log.debug(u'No valid session data - deleting session')
log.debug(u'Session: %r', session.items())
session.delete()
else:
log.debug(u'No session id - deleting session cookie')
response.delete_cookie(cookie)
# Remove auth_tkt repoze.who cookie if user not logged in.
elif cookie == u'auth_tkt' and not session.id:
response.delete_cookie(cookie)
return response
def set_cors_headers_for_response(response):
u'''
Set up Access Control Allow headers if either origin_allow_all is True, or
the request Origin is in the origin_whitelist.
'''
if config.get(u'ckan.cors.origin_allow_all') \
and request.headers.get(u'Origin'):
cors_origin_allowed = None
if asbool(config.get(u'ckan.cors.origin_allow_all')):
cors_origin_allowed = u'*'
elif config.get(u'ckan.cors.origin_whitelist') and \
request.headers.get(u'Origin') \
in config[u'ckan.cors.origin_whitelist'].split(u' '):
# set var to the origin to allow it.
cors_origin_allowed = request.headers.get(u'Origin')
if cors_origin_allowed is not None:
response.headers[u'Access-Control-Allow-Origin'] = \
cors_origin_allowed
response.headers[u'Access-Control-Allow-Methods'] = \
u'POST, PUT, GET, DELETE, OPTIONS'
response.headers[u'Access-Control-Allow-Headers'] = \
u'X-CKAN-API-KEY, Authorization, Content-Type'
return response
def identify_user():
u'''Try to identify the user
If the user is identified then:
g.user = user name (unicode)
g.userobj = user object
g.author = user name
otherwise:
g.user = None
g.userobj = None
g.author = user's IP address (unicode)
Note: Remember, when running under Pylons, `g` is the Pylons `c` object
'''
# see if it was proxied first
g.remote_addr = request.environ.get(u'HTTP_X_FORWARDED_FOR', u'')
if not g.remote_addr:
g.remote_addr = request.environ.get(u'REMOTE_ADDR',
u'Unknown IP Address')
# Authentication plugins get a chance to run here break as soon as a user
# is identified.
authenticators = p.PluginImplementations(p.IAuthenticator)
if authenticators:
for item in authenticators:
item.identify()
if g.user:
break
# We haven't identified the user so try the default methods
if not getattr(g, u'user', None):
_identify_user_default()
# If we have a user but not the userobj let's get the userobj. This means
# that IAuthenticator extensions do not need to access the user model
# directly.
if g.user and not getattr(g, u'userobj', None):
g.userobj = model.User.by_name(g.user)
# general settings
if g.user:
g.author = g.user
else:
g.author = g.remote_addr
g.author = unicode(g.author)
def _identify_user_default():
u'''
Identifies the user using two methods:
a) If they logged into the web interface then repoze.who will
set REMOTE_USER.
b) For API calls they may set a header with an API key.
'''
# environ['REMOTE_USER'] is set by repoze.who if it authenticates a
# user's cookie. But repoze.who doesn't check the user (still) exists
# in our database - we need to do that here. (Another way would be
# with an userid_checker, but that would mean another db access.
# See: http://docs.repoze.org/who/1.0/narr.html#module-repoze.who\
# .plugins.sql )
g.user = request.environ.get(u'REMOTE_USER', u'')
if g.user:
g.user = g.user.decode(u'utf8')
g.userobj = model.User.by_name(g.user)
if g.userobj is None or not g.userobj.is_active():
# This occurs when a user that was still logged in is deleted, or
# when you are logged in, clean db and then restart (or when you
# change your username). There is no user object, so even though
# repoze thinks you are logged in and your cookie has
# ckan_display_name, we need to force user to logout and login
# again to get the User object.
ev = request.environ
if u'repoze.who.plugins' in ev:
pth = getattr(ev[u'repoze.who.plugins'][u'friendlyform'],
u'logout_handler_path')
redirect(pth)
else:
g.userobj = _get_user_for_apikey()
if g.userobj is not None:
g.user = g.userobj.name
def _get_user_for_apikey():
apikey_header_name = config.get(APIKEY_HEADER_NAME_KEY,
APIKEY_HEADER_NAME_DEFAULT)
apikey = request.headers.get(apikey_header_name, u'')
if not apikey:
apikey = request.environ.get(apikey_header_name, u'')
if not apikey:
# For misunderstanding old documentation (now fixed).
apikey = request.environ.get(u'HTTP_AUTHORIZATION', u'')
if not apikey:
apikey = request.environ.get(u'Authorization', u'')
# Forget HTTP Auth credentials (they have spaces).
if u' ' in apikey:
apikey = u''
if not apikey:
return None
apikey = apikey.decode(u'utf8', u'ignore')
log.debug(u'Received API Key: %s' % apikey)
query = model.Session.query(model.User)
user = query.filter_by(apikey=apikey).first()
return user
| 37.154696
| 78
| 0.624833
|
4f4dbd201c0115cb165d78bcf485d57eada95b90
| 691
|
py
|
Python
|
Unit test/test_detect_img_TF.py
|
khaledsabry97/Argus
|
c794f6e46ec529a836db127dfdb33b3161cf79ee
|
[
"MIT"
] | 9
|
2021-01-09T17:04:18.000Z
|
2022-03-24T11:26:00.000Z
|
Unit test/test_detect_img_TF.py
|
khaledsabry97/Argus
|
c794f6e46ec529a836db127dfdb33b3161cf79ee
|
[
"MIT"
] | 4
|
2021-03-21T21:40:06.000Z
|
2022-03-12T00:53:55.000Z
|
Unit test/test_detect_img_TF.py
|
khaledsabry97/Argus
|
c794f6e46ec529a836db127dfdb33b3161cf79ee
|
[
"MIT"
] | 3
|
2021-03-13T07:39:19.000Z
|
2022-01-28T23:00:51.000Z
|
from Car_Detection_TF.yolo import YOLO
from PIL import Image
import cv2
"""
Unit test for detect_img using TF.
args: cv image
return: list of boxes bounding detected vehicles
"""
def test_detect_img_invalid_input():
"""
Verify type consistency of the function
"""
yolo = YOLO()
img, bboxes = yolo.detect_image(None)
assert img == None and bboxes == None
def test_detect_img_valid_input():
"""
Verify the method works as expected
"""
yolo = YOLO()
frame = cv2.imread('Car_Detection/test.png')
image = Image.fromarray(frame)
img, bboxes = yolo.detect_image(image)
assert len(bboxes) != 0 and len(bboxes) >= 2
| 20.939394
| 52
| 0.658466
|
49f583e7f501629aef667a9cbd9c10496acbdfc5
| 4,372
|
py
|
Python
|
tests/test_candle_service.py
|
laughingwithu/jesse
|
c21adf59074ad62e4aa775261b4ad86c542ec4d5
|
[
"MIT"
] | 10
|
2020-05-24T17:31:11.000Z
|
2022-03-18T00:31:14.000Z
|
tests/test_candle_service.py
|
laughingwithu/jesse
|
c21adf59074ad62e4aa775261b4ad86c542ec4d5
|
[
"MIT"
] | null | null | null |
tests/test_candle_service.py
|
laughingwithu/jesse
|
c21adf59074ad62e4aa775261b4ad86c542ec4d5
|
[
"MIT"
] | 4
|
2020-07-08T19:24:45.000Z
|
2022-01-26T12:53:04.000Z
|
import numpy as np
from jesse.factories import fake_range_candle
from jesse.services.candle import *
def test_is_bullish():
c = np.array([1543387200000, 190, 200, 220, 180, 195])
assert is_bullish(c)
def test_is_bearish():
c = np.array([1543387200000, 200, 190, 220, 180, 195])
assert is_bearish(c)
def test_generate_candle_from_one_minutes():
candles = fake_range_candle(5)
five_minutes_candle = generate_candle_from_one_minutes('5m', candles)
assert five_minutes_candle[0] == candles[0][0]
assert five_minutes_candle[1] == candles[0][1]
assert five_minutes_candle[2] == candles[-1][2]
assert five_minutes_candle[3] == candles[:, 3].max()
assert five_minutes_candle[4] == candles[:, 4].min()
assert five_minutes_candle[5] == candles[:, 5].sum()
def test_candle_includes_price():
c = np.array([1543387200000, 10, 20, 25, 5, 195])
assert candle_includes_price(c, 5)
assert candle_includes_price(c, 15)
assert candle_includes_price(c, 25)
assert not candle_includes_price(c, 4)
assert not candle_includes_price(c, 26)
def test_split_candle():
"""
these values has been tested from my thoughts on paper. You need to reproduce my drawings for them to make sense
"""
bull = np.array([1111, 10, 20, 25, 5, 2222])
bear = np.array([1111, 20, 10, 25, 5, 2222])
# bullish candle, low < price < open
np.testing.assert_equal(
split_candle(bull, 7),
(
np.array([1111, 10, 7, 10, 7, 2222]),
np.array([1111, 7, 20, 25, 5, 2222]),
)
)
# bearish candle, open < price < high
np.testing.assert_equal(
split_candle(bear, 23),
(
np.array([1111, 20, 23, 23, 20, 2222]),
np.array([1111, 23, 10, 25, 5, 2222]),
)
)
# bullish candle, price == open
np.testing.assert_equal(
split_candle(bull, bull[1]),
(bull, bull)
)
# bearish candle, price == open
np.testing.assert_equal(
split_candle(bear, bear[1]),
(bear, bear)
)
# bearish candle, low < price < close
np.testing.assert_equal(
split_candle(bear, 7),
(
np.array([1111, 20, 7, 25, 7, 2222]),
np.array([1111, 7, 10, 10, 5, 2222]),
)
)
# bullish candle, close < price < high
np.testing.assert_equal(
split_candle(bull, 23),
(
np.array([1111, 10, 23, 23, 5, 2222]),
np.array([1111, 23, 20, 25, 20, 2222]),
)
)
# bearish candle, price == close
np.testing.assert_equal(
split_candle(bear, 10),
(
np.array([1111, 20, 10, 25, 10, 2222]),
np.array([1111, 10, 10, 10, 5, 2222]),
)
)
# bullish candle, close < price < high
np.testing.assert_equal(
split_candle(bull, 20),
(
np.array([1111, 10, 20, 20, 5, 2222]),
np.array([1111, 20, 20, 25, 20, 2222]),
)
)
# bearish candle, price == high
np.testing.assert_equal(
split_candle(bear, 25),
(
np.array([1111, 20, 25, 25, 20, 2222]),
np.array([1111, 25, 10, 25, 5, 2222]),
)
)
# bullish candle, price == low
np.testing.assert_equal(
split_candle(bull, 5),
(
np.array([1111, 10, 5, 10, 5, 2222]),
np.array([1111, 5, 20, 25, 5, 2222]),
)
)
# bearish candle, price == low
np.testing.assert_equal(
split_candle(bear, 5),
(
np.array([1111, 20, 5, 25, 5, 2222]),
np.array([1111, 5, 10, 10, 5, 2222]),
)
)
# bullish candle, price == high
np.testing.assert_equal(
split_candle(bull, 25),
(
np.array([1111, 10, 25, 25, 5, 2222]),
np.array([1111, 25, 20, 25, 20, 2222]),
)
)
# bearish candle, close < price < open
np.testing.assert_equal(
split_candle(bear, 15),
(
np.array([1111, 20, 15, 25, 15, 2222]),
np.array([1111, 15, 10, 15, 5, 2222]),
)
)
# bullish candle, open < price < close
np.testing.assert_equal(
split_candle(bull, 15),
(
np.array([1111, 10, 15, 15, 5, 2222]),
np.array([1111, 15, 20, 25, 15, 2222]),
)
)
| 27.325
| 116
| 0.54323
|
5f4db5a2429a8ca9910e31b182931ed7ab592060
| 2,730
|
py
|
Python
|
app/map_settings.py
|
jsolodev/nettemp
|
e1e9605b853689822f3e9c2a8ae9bf02e266359c
|
[
"MIT"
] | null | null | null |
app/map_settings.py
|
jsolodev/nettemp
|
e1e9605b853689822f3e9c2a8ae9bf02e266359c
|
[
"MIT"
] | null | null | null |
app/map_settings.py
|
jsolodev/nettemp
|
e1e9605b853689822f3e9c2a8ae9bf02e266359c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from app import app
from flask import Flask, request, jsonify, render_template
import sqlite3
from flask_login import login_required
from app.nettemp import nt_settings
from flask_mysqldb import MySQL
mysql = MySQL()
def queryselectsensors():
m = mysql.connection.cursor()
m.execute("SELECT maps.map_id, sensors.name, maps.map_on, maps.display_name, maps.transparent_bkg, maps.background_color, maps.background_low, maps.background_high, maps.font_color, maps.font_size FROM sensors INNER JOIN maps ON sensors.map_id = maps.map_id")
data = m.fetchall()
m.close()
return data
@app.route('/settings/map/settings', methods=['GET','POST'])
@login_required
def settings_map():
if request.method == "POST":
if request.form.get('send-map-image') == 'yes':
map_height = request.form['map_height']
map_width = request.form['map_width']
m = mysql.connection.cursor()
m.execute("UPDATE nt_settings SET value=%s WHERE option='map_width'", (map_width,))
m.execute("UPDATE nt_settings SET value=%s WHERE option='map_height'", (map_height,))
m.connection.commit()
m.close()
if request.form.get('send') == 'yes':
name = request.form['name']
value = request.form['value']
id = request.form['id']
m = mysql.connection.cursor()
if name=='transparent_bkg':
sql = "UPDATE maps SET transparent_bkg=%s WHERE map_id=%s"
if name=='map_on':
sql = "UPDATE maps SET map_on=%s WHERE map_id=%s"
if name=='font_size':
sql = "UPDATE maps SET font_size=%s WHERE map_id=%s"
if name=='font_color':
sql = "UPDATE maps SET font_color=%s WHERE map_id=%s"
if name=='display_name':
sql = "UPDATE maps SET display_name=%s WHERE map_id=%s"
if name=='background_low':
sql = "UPDATE maps SET background_low=%s WHERE map_id=%s"
if name=='background_high':
sql = "UPDATE maps SET background_high=%s WHERE map_id=%s"
if name=='background_color':
sql = "UPDATE maps SET background_color=%s WHERE map_id=%s"
data = (value,id,)
m.execute(sql, data)
m.connection.commit()
m.close()
if request.form.get('send-default') == 'yes':
id = request.form['id']
m = mysql.connection.cursor()
sql = "UPDATE maps SET map_on='on', transparent_bkg='', control_on_map='', display_name='', background_color='', background_low='', background_high='', font_color='', font_size='', icon='' WHERE map_id=%s"
data = [id,]
m.execute(sql, data)
m.connection.commit()
m.close()
data = queryselectsensors()
return render_template('map_settings.html', nt_settings=dict(nt_settings()), data=data)
| 39.565217
| 261
| 0.660073
|
1684393222dfb6170cd4181197c9f83080c2f84c
| 19,432
|
py
|
Python
|
scripts/generate_readme_files.py
|
imhuay/studies-gitbook
|
69a31c20c91d131d0fafce0622f4035b9b95e93a
|
[
"MIT"
] | 100
|
2021-10-13T01:22:27.000Z
|
2022-03-31T09:52:49.000Z
|
scripts/generate_readme_files.py
|
imhuay/studies-gitbook
|
69a31c20c91d131d0fafce0622f4035b9b95e93a
|
[
"MIT"
] | null | null | null |
scripts/generate_readme_files.py
|
imhuay/studies-gitbook
|
69a31c20c91d131d0fafce0622f4035b9b95e93a
|
[
"MIT"
] | 27
|
2021-11-01T01:05:09.000Z
|
2022-03-31T03:32:01.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time: 2021-10-01 11:13 下午
Author: huayang
Subject:
"""
import os
import re
import sys
import json
import inspect
from types import *
from typing import *
from collections import defaultdict, OrderedDict
from dataclasses import dataclass
from pathlib import Path
os.environ['NUMEXPR_MAX_THREADS'] = '8'
WORK_UTILS = (10, 'Work Utils')
PYTORCH_MODELS = (20, 'Pytorch Models')
PYTORCH_UTILS = (30, 'Pytorch Utils')
PYTHON_UTILS = (40, 'Python Utils')
TAG_MAPPING = {
'NLP Utils': WORK_UTILS,
'Image Utils': WORK_UTILS,
'Work Utils': WORK_UTILS,
'Python Utils': PYTHON_UTILS,
'Python 自定义数据结构': PYTHON_UTILS,
'Pytorch Models': PYTORCH_MODELS,
'Pytorch Utils': PYTORCH_UTILS,
'Pytorch Loss': PYTORCH_UTILS,
'Pytorch Train Plugin': PYTORCH_UTILS,
}
class args: # noqa
flag = 'huaytools'
script_path = os.path.dirname(__file__)
repo_path = os.path.abspath(os.path.join(script_path, '..'))
src_path = os.path.join(repo_path, 'src')
algo_path = os.path.join(repo_path, 'algorithms')
prefix_topics = 'topics'
prefix_problems = 'problems'
prefix_notes = '_notes'
problems_path = os.path.join(algo_path, prefix_problems)
notes_path = os.path.join(algo_path, prefix_notes)
topics_path = os.path.join(algo_path, prefix_topics)
sys.path.append(args.src_path)
try:
from huaytools.python.code_analysis import module_iter, slugify
from huaytools.python.file_utils import files_concat
from huaytools.python.utils import get_logger
from huaytools.tools.auto_readme import *
except:
ImportError(f'import huaytools error.')
logger = get_logger()
RE_INFO = re.compile(r'<!--(.*?)-->', flags=re.S)
RE_TAG = re.compile(r'Tag: (.*?)\s')
RE_SEP = re.compile(r'[,,、]')
RE_TITLE = re.compile(r'#+\s+(.*?)$')
RE_INDENT = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE)
beg_details_tmp = '<details><summary><b> {key} <a href="{url}">¶</a></b></summary>\n'
beg_details_cnt_tmp = '<details><summary><b> {key} [{cnt}] <a href="{url}">¶</a></b></summary>\n'
end_details = '\n</details>\n'
auto_line = '<font color="LightGrey"><i> `This README is Auto-generated` </i></font>\n'
def build_tag2topic_map(notes_dir):
file_names = os.listdir(notes_dir)
topic2tags = dict()
for fn in file_names:
topic, _ = os.path.splitext(fn)
txt = open(os.path.join(notes_dir, fn), encoding='utf8').read()
tags = RE_SEP.split(RE_TAG.search(txt).group(1))
topic2tags[topic] = tags
# topic2tags[topic] = topic.split('-')[1].split('、')
tag2topic = {v.lower(): k for k, vs in topic2tags.items() for v in vs}
return tag2topic
# TODO: 对 `My Code Lab` 下的条目也添加上面的展示标签,和内部标签
def hn_line(line, lv=2):
""""""
return f'{"#" * lv} {line}'
class Algorithms:
""""""
sp_kw = {'合集', '模板', '经典'}
more_info = '更多细分类型'
def __init__(self):
""""""
self.args = args
self.template_name = '*模板'
self.toc_name = self.__class__.__name__
self.prefix_topics = args.prefix_topics
self.prefix_problems = args.prefix_problems
self.prefix_notes = args.prefix_notes
self.prefix_algorithm = os.path.basename(os.path.abspath(args.algo_path))
# print(self.prefix_algo)
self.prefix_algorithm_topics = os.path.join(self.prefix_algorithm, self.prefix_topics)
self.prefix_algorithm_notes = os.path.join(self.prefix_algorithm, self.prefix_notes)
# print(self.prefix_repo)
# args.problems_path = os.path.join(args.algo_path, self.prefix_problems)
# args.notes_path = os.path.join(args.algo_path, self.prefix_notes)
self.tag2topic_map = build_tag2topic_map(args.notes_path)
problems_dt = self.parse_problems()
append_lines = self.gen_topic_md_sorted(problems_dt)
self.content = '\n'.join(append_lines)
diff = set(os.listdir(args.topics_path)) - set(os.listdir(args.notes_path))
assert len(diff) == 0, diff
# algo_path = os.path.join(repo_path, self.prefix)
# fns = sorted([fn for fn in os.listdir(algo_path) if fn.startswith('专题-')])
# toc_lns = [self.toc_head, '---']
# for fn in fns:
# name, _ = os.path.splitext(fn)
# ln = f'- [{name}]({os.path.join(self.prefix, fn)})'
# toc_lns.append(ln)
#
# self.toc = '\n'.join(toc_lns)
def gen_tags_svg(self, tags): # noqa
""""""
lns = []
for idx, (tag, topic) in enumerate(tags.items()):
""""""
# 
lns.append(f'[]({self.get_topic_fn(topic)})')
# lns.append(f'[{tag}](https://img.shields.io/badge/{tag}-lightgray.svg)]')
return '\n'.join(lns)
def get_new_file_name(self, info): # noqa
""""""
src, no, dif, name = info['来源'], info['编号'], info['难度'], info['标题']
return f'{src}_{no}_{dif}_{name}.md'
def parse_problems(self):
""""""
problems_dt = defaultdict(list) # {tag: file_txt_ls}
# files = os.listdir(args.problems_path)
file_iter = []
for prefix, _, files in os.walk(args.problems_path):
for f in files:
fn, ext = os.path.splitext(f)
if ext != '.md' or fn.startswith('-') or fn.startswith('_'):
continue
fp = os.path.join(prefix, f)
suffix = '-'.join(prefix.split('/')[-2:])
file_iter.append((fn, fp, suffix))
# 解析算法 tags
for fn, fp, suffix in file_iter:
# fn, _ = os.path.splitext(f)
# fp = os.path.join(args.problems_path, f)
# src, pid, lv, pn = fn.rsplit('_', maxsplit=3)
fp = Path(fp)
txt = open(fp, encoding='utf8').read()
info_ret = RE_INFO.search(txt)
if not info_ret:
print(fn, fp, suffix)
continue
try:
info = json.loads(info_ret.group(1))
except:
raise ValueError(f'{fp}')
# rename 如果需要
new_file_name = self.get_new_file_name(info)
if new_file_name != fp.name:
logger.info(f'rename {fp.name} to {new_file_name}')
fp = fp.rename(fp.parent / new_file_name)
command_ln = f'git add "{fp}"'
logger.info(command_ln)
os.system(command_ln)
src, pid, lv, pn = info['来源'], info['编号'], info['难度'], info['标题']
tag_append = [src] # if src != self.template_name else []
# tags = RE_SEP.split(RE_TAG.search(txt).group(1)) + tag_append
tags = info['tags'] + tag_append
tags = [tag.strip() for tag in tags]
tag2topic = {tag: self.tag2topic_map[tag.lower()] for tag in tags}
topics = list(tag2topic.values())
pid = f'{pid}' if pid.isnumeric() else pid
head = f'`{src} {pid} {pn} ({lv}, {suffix})`'
lines = txt.split('\n')
# lines[0] = f'### {head}'
lines.insert(0, '')
lines.insert(0, self.gen_tags_svg(tag2topic))
lines.insert(0, '')
lines.insert(0, f'### {head}')
txt = '\n'.join(lines)
txt = txt.rstrip().replace(r'../../../_assets', '../_assets') + '\n\n---\n'
for topic in topics:
problems_dt[topic].append((head, txt))
for k, v in problems_dt.items():
problems_dt[k] = sorted(v)
problems_dt = OrderedDict(sorted(problems_dt.items()))
return problems_dt
@staticmethod
def get_topic_fn(tag):
return f'{tag}.md'
def gen_topic_md_sorted(self, problems_dt):
"""生成算法专题md,对主页topics排序"""
readme_lines = [self.toc_name, '===\n', auto_line]
append_lines = [self.toc_name, '---']
append_blocks = []
# problems_index_ln = 'Problems Index'
problems_index_ln = 'Problems'
for tag, problems_txts in problems_dt.items(): # noqa
""""""
append_tmp = []
topic_fn = self.get_topic_fn(tag)
topic_name, _ = os.path.splitext(topic_fn)
index_lines = [problems_index_ln, '---']
# index_lines = []
# readme_lines.append(f'- [{topic_fn}]({topic_fn}.md)')
# append_lines.append(f'- [{topic_fn}]({self.prefix}/{topic_fn}.md)')
algo_url = os.path.join(self.prefix_topics, topic_fn)
repo_url = os.path.join(self.prefix_algorithm_topics, topic_fn)
problems_cnt = len(problems_txts)
readme_lines.append(beg_details_cnt_tmp.format(key=topic_name, url=algo_url, cnt=problems_cnt))
# append_lines.append(beg_details_tmp.format(key=topic_name, url=repo_url))
append_tmp.append(beg_details_cnt_tmp.format(key=topic_name, url=repo_url, cnt=problems_cnt))
contents = []
for (head, txt) in problems_txts:
# head = fn
# link = self.parse_head(txt)
link = slugify(head)
contents.append(txt)
index_lines.append(f'- [{head}](#{link})')
readme_lines.append(f'- [{head}]({algo_url}#{link})')
# append_lines.append(f'- [{head}]({repo_url}#{link})')
append_tmp.append(f'- [{head}]({repo_url}#{link})')
readme_lines.append(end_details)
# append_lines.append(end_details)
append_tmp.append(end_details)
index_lines.append('\n---')
topic_main_lines = open(os.path.join(args.repo_path, self.prefix_algorithm_notes, topic_fn),
encoding='utf8').read().rstrip().split('\n')
# topic_main_lines.insert(0, f'[{problems_index_ln}](#{slugify(problems_index_ln)})\n')
topic_main_lines.insert(0, f'# {tag.split("-")[1]}\n')
topic_main = '\n'.join(topic_main_lines)
topic_main_toc = '\n'.join(index_lines)
topic_content = '\n'.join(contents)
f_out = os.path.join(args.repo_path, self.prefix_algorithm_topics, topic_fn)
content = files_concat([topic_main, topic_main_toc, topic_content], '\n')
fw_helper.write(f_out, content)
# topic_type = topic_name.split('-')[0]
# append_blocks.append((append_tmp, topic_type, problems_cnt))
append_blocks.append((append_tmp, topic_name, problems_cnt))
# with open(os.path.join(args.algo_path, 'README.md'), 'w', encoding='utf8') as fw:
# fw.write('\n'.join(readme_lines))
fw_helper.write(os.path.join(args.algo_path, 'README.md'), '\n'.join(readme_lines))
# append_blocks = sorted(append_blocks, key=lambda x: (x[1], -x[2]))
def block_assert(_block):
return any(kw in _block[0] for kw in self.sp_kw)
append_blocks = sorted(append_blocks)
for it in append_blocks:
block = it[0]
if block_assert(block):
append_lines += block
append_lines.append('<details><summary><b>{more_info} ...<a href="{url}">¶</a></b></summary>\n'.format(
more_info=self.more_info,
url=f'{self.prefix_algorithm}/README.md'
))
for it in append_blocks:
block = it[0]
if not block_assert(block):
append_lines += block
append_lines.append(end_details)
# append_lines.append(f'- [All Topics]({self.prefix_algo}/README.md)')
return append_lines
@staticmethod
def parse_head(txt):
""""""
# 标题解析
try:
head = RE_TITLE.search(txt.split('\n', maxsplit=1)[0]).group(1)
except:
raise Exception('parsing head error!')
return head
class Codes:
""""""
@dataclass()
class DocItem:
""" 每个 docstring 需要提取的内容 """
flag: Tuple
summary: str
content: str
module_path: str
line_no: int
link: str = None
def __post_init__(self):
self.link = f'[source]({self.module_path}#L{self.line_no})'
def get_block(self, prefix=''):
""""""
block = f'### {self.summary}\n'
block += f'> [source]({os.path.join(prefix, self.module_path)}#L{self.line_no})\n\n'
# block += f'<details><summary><b> Intro & Example </b></summary>\n\n'
block += '```python\n'
block += f'{self.content}'
block += '```\n'
# block += '\n</details>\n'
return block
def __init__(self):
""""""
# self.code_path = args.code_path
# print(self.code_path)
self.code_readme_path = os.path.join(args.src_path, 'README.md')
self.toc_name = self.__class__.__name__
docs_dt = self.parse_docs()
self.code_basename = os.path.basename(os.path.abspath(args.src_path))
self.content = self.gen_readme_md_simply(docs_dt)
def parse_docs(self):
""" 生成 readme for code """
docs_dt = defaultdict(list)
sys.path.append(args.repo_path)
for module in module_iter(args.src_path):
if hasattr(module, '__all__'):
# print(module.__name__)
for obj_str in module.__all__:
obj = getattr(module, obj_str)
if isinstance(obj, (ModuleType, FunctionType, type)) \
and getattr(obj, '__doc__') \
and obj.__doc__.startswith('@'):
# print(obj.__name__)
doc = self.parse_doc(obj)
docs_dt[doc.flag].append(doc)
return docs_dt
def parse_doc(self, obj) -> DocItem:
""""""
raw_doc = obj.__doc__
lines = raw_doc.split('\n')
flag = TAG_MAPPING[lines[0][1:]]
lines = lines[1:]
min_indent = self.get_min_indent('\n'.join(lines))
lines = [ln[min_indent:] for ln in lines]
summary = f'`{obj.__name__}: {lines[0]}`'
content = '\n'.join(lines)
line_no = self.get_line_number(obj)
module_path = self.get_module_path(obj)
return self.DocItem(flag, summary, content, module_path, line_no)
@staticmethod
def get_line_number(obj):
""" 获取对象行号
基于正则表达式,所以不一定保证准确
"""
return inspect.findsource(obj)[1] + 1
@staticmethod
def get_module_path(obj):
abs_url = inspect.getmodule(obj).__file__
dirs = abs_url.split('/')
idx = dirs[::-1].index(args.flag) # *从后往前*找到 my 文件夹,只有这个位置是基本固定的
return '/'.join(dirs[-(idx + 1):]) # 再找到这个 my 文件夹的上一级目录
@staticmethod
def get_min_indent(s):
"""Return the minimum indentation of any non-blank line in `s`"""
indents = [len(indent) for indent in RE_INDENT.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def gen_readme_md_simply(self, docs_dt: Dict[str, List[DocItem]]):
""" 简化首页的输出 """
# args = self.args
# code_prefix = os.path.basename(os.path.abspath(args.code_path))
# print(code_prefix)
toc = [self.toc_name, '---']
append_toc = [self.toc_name, '---']
readme_lines = []
# append_lines = []
key_sorted = sorted(docs_dt.keys())
for key in key_sorted:
blocks = docs_dt[key]
key = key[1]
toc.append(beg_details_tmp.format(key=key, url=f'#{slugify(key)}'))
# append_toc.append(beg_details_tmp.format(key=key, url=f'{self.code_basename}/README.md#{slugify(key)}'))
append_toc.append('### {key} [¶]({url})\n'.format(key=key,
url=f'{self.code_basename}/README.md#{slugify(key)}'))
readme_lines.append(hn_line(key, 2))
# append_lines.append(hn_line(key, 2))
for d in blocks:
toc.append(f'- [{d.summary}](#{slugify(d.summary)})')
append_toc.append(f'- [{d.summary}]({self.code_basename}/README.md#{slugify(d.summary)})')
readme_lines.append(d.get_block())
# append_lines.append(d.get_block(prefix=code_prefix))
toc.append(end_details)
# append_toc.append(end_details)
append_toc.append('\n')
toc_str = '\n'.join(toc[:2] + [auto_line] + toc[2:])
sep = '\n---\n\n'
content_str = '\n\n'.join(readme_lines)
code_readme = toc_str + sep + content_str
# with open(self.code_readme_path, 'w', encoding='utf8') as fw:
# fw.write(code_readme)
fw_helper.write(self.code_readme_path, code_readme)
append_toc_str = '\n'.join(append_toc)
main_append = append_toc_str + sep # + '\n\n'.join(append_lines)
return main_append
def get_repo_toc(*toc_parts):
""""""
lns = ['Repo Index', '---']
for part in toc_parts:
name = part.toc_name
lns.append(f'- [{name}](#{slugify(name)})')
return '\n'.join(lns)
# TOTAL_ADD = 0
# def file_write_helper(abspath, content):
# """"""
# global TOTAL_ADD
#
# old_content = ''
# if os.path.exists(abspath):
# old_content = open(abspath, encoding='utf8').read()
#
# if old_content != content:
# with open(abspath, 'w', encoding='utf8') as fw:
# fw.write(content)
#
# command_ln = f'git add "{abspath}"'
# logger.info(command_ln)
# os.system(command_ln)
# TOTAL_ADD += 1
def pipeline():
""""""
# args = simple_argparse()
args.repo_readme_path = os.path.join(args.repo_path, r'README.md')
# if os.path.exists(args.repo_readme_path):
# readme_old = open(args.repo_readme_path, encoding='utf8').read()
# else:
# readme_old = ''
# code_toc, code_append = gen_code_readme(args)
parts = [
Algorithms(),
Notes('../notes'),
Papers('../papers'),
Books('../books'),
Codes()
]
repo_toc = get_repo_toc(*parts)
readme_main_path = os.path.join(args.repo_path, r'README-main.md')
main_auto_line = '<font color="LightGrey"><i> `The following is Auto-generated` </i></font>'
content = files_concat(src_in=[readme_main_path,
# main_auto_line,
repo_toc] + [it.content for it in parts],
sep='\n---\n\n')
fw_helper.write(args.repo_readme_path, content)
# readme = open(args.repo_readme_path, encoding='utf8').read()
# if readme_old != readme:
print(fw_helper.add_cnt)
if __name__ == '__main__':
""""""
pipeline()
# if len(sys.argv) > 1:
# pipeline()
# # print('SUCCESS')
# else:
# # 抑制标准输出,只打印 WARNING 信息
# # sys.stdout = open(os.devnull, 'w')
# command = "generate_readme_files.py " \
# "--repo_path ../ " \
# "--code_path ../code/ " \
# "--algo_path ../algorithm/ "
# sys.argv = command.split()
# _test()
| 34.886894
| 118
| 0.566025
|
f21de0a60f2f7425efa6a92d30c440a09ceb9763
| 1,524
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.feature_column.sequence_feature_column import SequenceFeatures
from tensorflow.python.keras.layers.recurrent import PeepholeLSTMCell
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import CosineDecay
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import CosineDecayRestarts
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import LinearCosineDecay
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import NoisyLinearCosineDecay
from tensorflow.python.keras.premade.linear import LinearModel
from tensorflow.python.keras.premade.wide_deep import WideDeepModel
from tensorflow.python.keras.saving.saved_model_experimental import export_saved_model
from tensorflow.python.keras.saving.saved_model_experimental import load_from_saved_model
from tensorflow.python.keras.utils.data_utils import terminate_keras_multiprocessing_pools
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "keras.experimental", public_apis=None, deprecation=True,
has_lite=False)
| 50.8
| 94
| 0.862861
|
ceedc1d8f4069c705bf35f92a35c4b7ac41a2e63
| 6,945
|
py
|
Python
|
neutron/tests/unit/db/test_ipam_backend_mixin.py
|
ISCAS-VDI/neutron-base
|
687f03d7131839ae8bc324d5823194d1245bb050
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/db/test_ipam_backend_mixin.py
|
ISCAS-VDI/neutron-base
|
687f03d7131839ae8bc324d5823194d1245bb050
|
[
"Apache-2.0"
] | 3
|
2015-02-27T00:48:55.000Z
|
2015-04-21T05:29:37.000Z
|
neutron/tests/unit/db/test_ipam_backend_mixin.py
|
ISCAS-VDI/neutron-base
|
687f03d7131839ae8bc324d5823194d1245bb050
|
[
"Apache-2.0"
] | 3
|
2015-02-26T00:55:17.000Z
|
2020-03-01T17:05:40.000Z
|
# Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants
from neutron.common import constants as n_const
from neutron.db import ipam_backend_mixin
from neutron.tests import base
class TestIpamBackendMixin(base.BaseTestCase):
def setUp(self):
super(TestIpamBackendMixin, self).setUp()
self.mixin = ipam_backend_mixin.IpamBackendMixin()
self.ctx = mock.Mock()
self.default_new_ips = (('id-1', '192.168.1.1'),
('id-2', '192.168.1.2'))
self.default_original_ips = (('id-1', '192.168.1.1'),
('id-5', '172.20.16.5'))
self.owner_non_router = constants.DEVICE_OWNER_DHCP
self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF
def _prepare_ips(self, ips):
return [{'ip_address': ip[1],
'subnet_id': ip[0]} for ip in ips]
def _mock_slaac_subnet_on(self):
slaac_subnet = {'ipv6_address_mode': n_const.IPV6_SLAAC,
'ipv6_ra_mode': n_const.IPV6_SLAAC}
self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet)
def _mock_slaac_subnet_off(self):
non_slaac_subnet = {'ipv6_address_mode': None,
'ipv6_ra_mode': None}
self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet)
def _test_get_changed_ips_for_port(self, expected_change, original_ips,
new_ips, owner):
change = self.mixin._get_changed_ips_for_port(self.ctx,
original_ips,
new_ips,
owner)
self.assertEqual(expected_change, change)
def test__get_changed_ips_for_port(self):
new_ips = self._prepare_ips(self.default_new_ips)
original_ips = self._prepare_ips(self.default_original_ips)
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=[original_ips[0]],
remove=[original_ips[1]])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_router)
def test__get_changed_ips_for_port_autoaddress(self):
new_ips = self._prepare_ips(self.default_new_ips)
original = (('id-1', '192.168.1.1'),
('id-5', '2000:1234:5678::12FF:FE34:5678'))
original_ips = self._prepare_ips(original)
self._mock_slaac_subnet_on()
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=original_ips,
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def test__get_changed_ips_for_port_autoaddress_ipv6_pd_enabled(self):
owner_not_router = constants.DEVICE_OWNER_DHCP
new_ips = self._prepare_ips(self.default_new_ips)
original = (('id-1', '192.168.1.1'),
('id-5', '2000:1234:5678::12FF:FE34:5678'))
original_ips = self._prepare_ips(original)
# mock to test auto address part
pd_subnet = {'subnetpool_id': constants.IPV6_PD_POOL_ID,
'ipv6_address_mode': n_const.IPV6_SLAAC,
'ipv6_ra_mode': n_const.IPV6_SLAAC}
self.mixin._get_subnet = mock.Mock(return_value=pd_subnet)
# make a copy of original_ips
# since it is changed by _get_changed_ips_for_port
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=[original_ips[0]],
remove=[original_ips[1]])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, owner_not_router)
def _test_get_changed_ips_for_port_no_ip_address(self):
# IP address should be added if only subnet_id is provided,
# independently from auto_address status for subnet
new_ips = [{'subnet_id': 'id-3'}]
original_ips = []
expected_change = self.mixin.Changes(add=[new_ips[0]],
original=[],
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def test__get_changed_ips_for_port_no_ip_address_no_slaac(self):
self._mock_slaac_subnet_off()
self._test_get_changed_ips_for_port_no_ip_address()
def test__get_changed_ips_for_port_no_ip_address_slaac(self):
self._mock_slaac_subnet_on()
self._test_get_changed_ips_for_port_no_ip_address()
def test__is_ip_required_by_subnet_for_router_port(self):
# Owner -> router:
# _get_subnet should not be called,
# expected True
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_router)
self.assertTrue(result)
self.assertFalse(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port(self):
# Owner -> not router:
# _get_subnet should be called,
# expected True, because subnet is not slaac
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertTrue(result)
self.assertTrue(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self):
# Owner -> not router:
# _get_subnet should be called,
# expected False, because subnet is slaac
self._mock_slaac_subnet_on()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertFalse(result)
self.assertTrue(self.mixin._get_subnet.called)
| 43.679245
| 78
| 0.60216
|
d0fc0ddc218975c2f834a46846e57cd8693af066
| 384
|
py
|
Python
|
learn/control/random.py
|
wuyou33/dynamicslearn
|
93589c7a39a5e6afb7f4d8a6ef6d4e35c6f436c5
|
[
"MIT"
] | 1
|
2019-11-30T02:11:24.000Z
|
2019-11-30T02:11:24.000Z
|
learn/control/random.py
|
wuyou33/dynamicslearn
|
93589c7a39a5e6afb7f4d8a6ef6d4e35c6f436c5
|
[
"MIT"
] | null | null | null |
learn/control/random.py
|
wuyou33/dynamicslearn
|
93589c7a39a5e6afb7f4d8a6ef6d4e35c6f436c5
|
[
"MIT"
] | null | null | null |
from .controller import Controller
class RandomController(Controller):
def __init__(self, env, controller_cfg):
self.env = env
self.cfg = controller_cfg
def reset(self):
print("Resetting Random Controller Not Needed, but passed")
return
def get_action(self, state):
action = self.env.action_space.sample()
return action
| 24
| 67
| 0.664063
|
466cbd7a2c0e2137a3fde3b6b349e2924084acf6
| 151
|
py
|
Python
|
Facebooker/urls.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 26
|
2020-05-29T02:41:05.000Z
|
2022-03-25T17:27:32.000Z
|
Facebooker/urls.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 7
|
2020-05-28T06:09:22.000Z
|
2021-10-02T05:22:38.000Z
|
Facebooker/urls.py
|
gpwork4u/Facebooker
|
4a78c9575d5f36a402d7e489b69058d4e1692ce4
|
[
"MIT"
] | 9
|
2020-05-28T05:40:25.000Z
|
2022-02-13T21:48:01.000Z
|
MBASIC_FACEBOOK = 'https://mbasic.facebook.com/'
M_FACEBOOK = 'https://m.facebook.com/'
def join(*args):
slash = '/'
return slash.join(args)
| 18.875
| 48
| 0.655629
|
ddf290b7c9cf12f63ec2e85b56e9eeff913f9d5a
| 16,071
|
py
|
Python
|
opennem/pipelines/nem/mms.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 22
|
2020-06-30T05:27:21.000Z
|
2022-02-21T12:13:51.000Z
|
opennem/pipelines/nem/mms.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 71
|
2020-08-07T13:06:30.000Z
|
2022-03-15T06:44:49.000Z
|
opennem/pipelines/nem/mms.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 13
|
2020-06-30T03:28:32.000Z
|
2021-12-30T08:17:16.000Z
|
import json
import logging
from datetime import datetime
from typing import Optional
from scrapy.exceptions import DropItem
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.sql import text
from opennem.core.dispatch_type import DispatchType, parse_dispatch_type
from opennem.core.facilitystations import facility_station_join_by_name
from opennem.core.fueltechs import lookup_fueltech
from opennem.core.normalizers import (
clean_capacity,
name_normalizer,
normalize_aemo_region,
normalize_duid,
normalize_string,
participant_name_filter,
station_name_cleaner,
)
from opennem.core.station_duid_map import facility_has_station_remap, facility_map_station
from opennem.db.models.opennem import Facility, FacilityStatus
from opennem.db.models.opennem import Participant as ParticipantModel
from opennem.db.models.opennem import Station
from opennem.pipelines import DatabaseStoreBase
from opennem.schema.opennem import ParticipantSchema
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class NemMMSSingle(DatabaseStoreBase):
"""
"""
def get_table(self, item):
if "tables" not in item:
logger.error(item)
raise Exception("No tables passed to pipeline")
table_names = [i["name"] for i in item["tables"]]
if self.table not in table_names:
logger.debug(
"Skipping %s pipeline step as table %s not processed",
self.__class__,
self.table,
)
return False
table = [
i
for i in item["tables"]
if "name" in i and i["name"] == self.table
]
return table.pop() if len(table) else None
class NemStoreMMSStations(DatabaseStoreBase):
"""
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
records_updated = 0
records_created = 0
for record in item:
created = False
duid = normalize_duid(record["STATIONID"])
name = station_name_cleaner(record["STATIONNAME"])
network_name = normalize_string(record["STATIONNAME"])
address1 = normalize_string(record["ADDRESS1"])
address2 = normalize_string(record["ADDRESS2"])
city = normalize_string(record["CITY"])
state = normalize_string(record["STATE"]).capitalize()
postcode = normalize_string(record["POSTCODE"])
station = (
s.query(Station)
.filter(Station.network_code == duid)
.one_or_none()
)
if not station:
station = Station(
code=duid,
network_code=duid,
created_by="au.nem.mms.stations",
)
records_created += 1
created = True
else:
station.updated_by = "au.nem.mms.stations"
records_updated += 1
station.name = name
station.network_id = "NEM"
station.network_name = network_name
station.address1 = address1
station.address2 = address2
station.locality = city
station.state = state
station.postcode = postcode
try:
s.add(station)
s.commit()
except Exception as e:
logger.error(e)
logger.debug(
"{} station record with id {}".format(
"Created" if created else "Updated", duid
)
)
logger.info(
"Created {} records and updated {}".format(
records_created, records_updated
)
)
class NemStoreMMSStationStatus(DatabaseStoreBase):
"""
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
for record in item:
duid = normalize_duid(record["STATIONID"])
# authorized_date = name_normalizer(record["AUTHORISEDDATE"])
# @TODO this needs to be mapped to v3 state
status = record["STATUS"]
station = (
s.query(Station)
.filter(Station.network_code == duid)
.one_or_none()
)
if not station:
logger.error("Could not find station {}".format(duid))
continue
# @TODO station statuses -> facilities should be
# set to retired if active
try:
s.add(station)
s.commit()
except Exception as e:
logger.error(e)
class NemStoreMMSParticipant(DatabaseStoreBase):
"""
@NOTE This pipeline has been converted to use pydantic models
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
records_updated = 0
records_created = 0
q = self.engine.execute(text("select code from participant"))
participant_codes = list(set([i[0] for i in q.fetchall()]))
records = item
for record in records:
created = False
if not "NAME" in record or not "PARTICIPANTID" in record:
logger.error(record)
raise Exception(
"Invalid MMS participant record: {}".format(record)
)
participant_schema = None
try:
participant_schema = ParticipantSchema(
**{
"code": record["PARTICIPANTID"],
"name": record["NAME"],
"network_name": record["NAME"],
}
)
except Exception:
logger.error(
"Validation error with record: {}".format(record["NAME"])
)
continue
# pid = normalize_duid(record["PARTICIPANTID"])
# name = normalize_string(record["NAME"])
# name_clean = participant_name_filter(record["NAME"])
participant = (
s.query(ParticipantModel)
.filter(ParticipantModel.code == participant_schema.code)
.one_or_none()
)
if not participant:
participant = ParticipantModel(
**{
**participant_schema.dict(),
"created_by": "au.nem.mms.participant",
}
)
records_created += 1
created = True
else:
participant.name = participant_schema.name
participant.network_name = participant_schema.network_name
records_updated += 1
try:
s.add(participant)
s.commit()
except Exception as e:
logger.error(e)
logger.debug(
"{} participant record with id {}".format(
"Created" if created else "Updated",
participant_schema.code,
)
)
logger.info(
"Created {} records and updated {}".format(
records_created, records_updated
)
)
class NemStoreMMSDudetail(DatabaseStoreBase):
"""
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
records_updated = 0
records_created = 0
for record in item:
created = False
duid = normalize_duid(record["DUID"])
capacity_registered = clean_capacity(record["REGISTEREDCAPACITY"])
capacity_max = clean_capacity(record["MAXCAPACITY"])
dispatch_type = parse_dispatch_type(record["DISPATCHTYPE"])
facility = (
s.query(Facility)
.filter(Facility.network_code == duid)
.one_or_none()
)
if not facility:
facility = Facility(
code=duid,
network_code=duid,
status_id="retired",
dispatch_type=dispatch_type,
created_by="au.nem.mms.dudetail",
)
records_created += 1
created = True
else:
facility.updated_by = "au.nem.mms.dudetail"
records_updated += 1
facility.capacity_registered = capacity_registered
facility.capacity_max = capacity_max
try:
s.add(facility)
s.commit()
except Exception as e:
logger.error(e)
logger.debug(
"MMS Dudetail: {} facility record with id {}".format(
"Created" if created else "Updated", duid
)
)
logger.info(
"MMS Dudetail:Created {} facility records and updated {}".format(
records_created, records_updated
)
)
class NemStoreMMSDudetailSummary(DatabaseStoreBase):
"""
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
records_updated = 0
records_created = 0
for record in item:
created = False
participant_code = normalize_duid(
record["facilities"][0]["PARTICIPANTID"]
)
# Step 1. Find participant by code or create
participant = (
s.query(ParticipantModel)
.filter(ParticipantModel.code == participant_code)
.one_or_none()
)
if not participant:
participant = ParticipantModel(
code=participant_code,
network_code=participant_code,
created_by="au.nem.mms.dudetail_summary",
)
logger.debug("Created participant {}".format(participant_code))
else:
participant.updated_by = "au.nem.mms.dudetail_summary"
# Step 3. now create the facilities and associate
for facility_record in record["facilities"]:
duid = normalize_duid(facility_record["DUID"])
station_code = facility_map_station(
duid, normalize_duid(record["id"])
)
network_region = normalize_aemo_region(
facility_record["REGIONID"]
)
date_start = facility_record["date_start"]
date_end = facility_record["date_end"]
facility_state = "retired"
# Step 2. Find station or create
station = (
s.query(Station)
.filter(Station.network_code == station_code)
.one_or_none()
)
if not station:
station = Station(
code=station_code,
network_code=station_code,
network_id="NEM",
created_by="au.nem.mms.dudetail_summary",
)
logger.debug("Created station {}".format(station_code))
else:
station.updated_by = "au.nem.mms.dudetail_summary"
station.participant = participant
if date_end == None:
facility_state = "operating"
if not "DISPATCHTYPE" in facility_record:
logger.error(
"MMS dudetailsummary: Invalid record: {}".format(
facility_record
)
)
continue
dispatch_type = parse_dispatch_type(
facility_record["DISPATCHTYPE"]
)
facility = (
s.query(Facility)
.filter(Facility.network_code == duid)
.one_or_none()
)
if not facility:
facility = Facility(
code=duid,
network_code=duid,
dispatch_type=dispatch_type,
created_by="au.nem.mms.dudetail_summary",
)
records_created += 1
created = True
else:
facility.updated_by = "au.nem.mms.dudetail_summary"
records_updated += 1
facility.network_region = network_region
facility.deregistered = date_end
facility.registered = date_start
facility.status_id = facility_state
if not facility.dispatch_type:
facility.dispatch_type = dispatch_type
# Associations
facility_station_id = facility_map_station(duid, station.id)
facility.station_id = station.id
try:
s.add(facility)
s.commit()
except Exception as e:
logger.error(e)
logger.debug(
"MMS DudetailSummary:{} facility record with id {}".format(
"Created" if created else "Updated", duid
)
)
logger.info(
"MMS DudetailSummary: Created {} facility records and updated {}".format(
records_created, records_updated
)
)
class NemStoreMMSStatdualloc(DatabaseStoreBase):
"""
AEMO MMS associates all duids with station ids
"""
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
records_updated = 0
records_created = 0
for record in item:
created = False
duid = normalize_duid(record["DUID"])
station_code = facility_map_station(
duid, normalize_duid(record["STATIONID"])
)
station = (
s.query(Station)
.filter(Station.network_code == station_code)
.one_or_none()
)
facility = (
s.query(Facility)
.filter(Facility.network_code == duid)
.one_or_none()
)
if not station:
station = Station(
code=station_code,
network_code=station_code,
network_id="NEM",
created_by="au.nem.mms.statdualloc",
)
if not facility:
facility = Facility(
code=duid,
network_code=duid,
network_id="NEM",
status_id="retired",
created_by="au.nem.mms.statdualloc",
)
records_created += 1
created = True
else:
facility.updated_by = "au.nem.mms.statdualloc"
records_updated += 1
facility.station = station
try:
s.add(facility)
s.commit()
except Exception as e:
logger.error(e)
logger.debug(
"{} facility record with id {}".format(
"Created" if created else "Updated", duid
)
)
logger.info(
"Created {} facility records and updated {}".format(
records_created, records_updated
)
)
| 29.7061
| 90
| 0.508058
|
f295011e2afc04f4a3e8aac8b25c5de976d3a3bd
| 19,837
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_custom_vm_commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2019-12-12T19:55:26.000Z
|
2019-12-12T19:55:26.000Z
|
src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_custom_vm_commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 2
|
2021-01-15T09:24:07.000Z
|
2021-01-15T09:30:10.000Z
|
src/azure-cli/azure/cli/command_modules/vm/tests/latest/test_custom_vm_commands.py
|
psignoret/azure-cli
|
1a4a043750315f9a7f2894b4287126089978b615
|
[
"MIT"
] | 1
|
2019-12-11T06:00:10.000Z
|
2019-12-11T06:00:10.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import mock
from knack.util import CLIError
from azure.cli.command_modules.vm.custom import (enable_boot_diagnostics, disable_boot_diagnostics,
_merge_secrets, BootLogStreamWriter,
_get_access_extension_upgrade_info,
_LINUX_ACCESS_EXT,
_WINDOWS_ACCESS_EXT,
_get_extension_instance_name,
get_boot_log)
from azure.cli.command_modules.vm.custom import \
(attach_unmanaged_data_disk, detach_data_disk, get_vmss_instance_view)
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import AzCliCommand
from azure.cli.command_modules.vm.disk_encryption import (encrypt_vm, decrypt_vm, encrypt_vmss, decrypt_vmss)
from azure.cli.core.profiles import get_sdk, ResourceType
from azure.cli.core.mock import DummyCli
NetworkProfile, StorageProfile, DataDisk, OSDisk, OperatingSystemTypes, InstanceViewStatus, \
VirtualMachineExtensionInstanceView, VirtualMachineExtension, ImageReference, DiskCreateOptionTypes, \
CachingTypes = get_sdk(DummyCli(), ResourceType.MGMT_COMPUTE, 'NetworkProfile', 'StorageProfile', 'DataDisk', 'OSDisk',
'OperatingSystemTypes', 'InstanceViewStatus', 'VirtualMachineExtensionInstanceView',
'VirtualMachineExtension', 'ImageReference', 'DiskCreateOptionTypes',
'CachingTypes',
mod='models', operation_group='virtual_machines') # FIXME split into loading by RT
def _get_test_cmd():
cli_ctx = DummyCli()
loader = AzCommandsLoader(cli_ctx, resource_type=ResourceType.MGMT_COMPUTE)
cmd = AzCliCommand(loader, 'test', None)
cmd.command_kwargs = {'resource_type': ResourceType.MGMT_COMPUTE, 'operation_group': 'virtual_machines'}
cmd.cli_ctx = cli_ctx
return cmd
class TestVmCustom(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def test_get_access_extension_upgrade_info(self):
# when there is no extension installed on linux vm, use the version we like
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
None, _LINUX_ACCESS_EXT)
self.assertEqual('Microsoft.OSTCExtensions', publisher)
self.assertEqual('1.5', version)
self.assertEqual(None, auto_upgrade)
# when there is no extension installed on windows vm, use the version we like
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
None, _WINDOWS_ACCESS_EXT)
self.assertEqual('Microsoft.Compute', publisher)
self.assertEqual('2.0', version)
self.assertEqual(None, auto_upgrade)
# when there is existing extension with higher version, stick to that
extentions = [FakedAccessExtensionEntity(True, '3.0')]
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
extentions, _LINUX_ACCESS_EXT)
self.assertEqual('3.0', version)
self.assertEqual(None, auto_upgrade)
extentions = [FakedAccessExtensionEntity(False, '10.0')]
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
extentions, _WINDOWS_ACCESS_EXT)
self.assertEqual('10.0', version)
self.assertEqual(None, auto_upgrade)
# when there is existing extension with lower version, upgrade to ours
extentions = [FakedAccessExtensionEntity(True, '1.0')]
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
extentions, _LINUX_ACCESS_EXT)
self.assertEqual('1.5', version)
self.assertEqual(True, auto_upgrade)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_get):
vm_fake = mock.MagicMock()
cmd = _get_test_cmd()
mock_vm_get.return_value = vm_fake
enable_boot_diagnostics(cmd, 'g1', 'vm1', 'https://storage_uri1')
self.assertTrue(vm_fake.diagnostics_profile.boot_diagnostics.enabled)
self.assertEqual('https://storage_uri1',
vm_fake.diagnostics_profile.boot_diagnostics.storage_uri)
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_enable_boot_diagnostics_skip_when_enabled_already(self, mock_vm_set, mock_vm_get):
vm_fake = mock.MagicMock()
cmd = _get_test_cmd()
mock_vm_get.return_value = vm_fake
vm_fake.diagnostics_profile.boot_diagnostics.enabled = True
vm_fake.diagnostics_profile.boot_diagnostics.storage_uri = 'https://storage_uri1'
enable_boot_diagnostics(cmd, 'g1', 'vm1', 'https://storage_uri1')
self.assertTrue(mock_vm_get.called)
self.assertFalse(mock_vm_set.called)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get):
vm_fake = mock.MagicMock()
cmd = _get_test_cmd()
mock_vm_get.return_value = vm_fake
vm_fake.diagnostics_profile.boot_diagnostics.enabled = True
vm_fake.diagnostics_profile.boot_diagnostics.storage_uri = 'storage_uri1'
disable_boot_diagnostics(cmd, 'g1', 'vm1')
self.assertFalse(vm_fake.diagnostics_profile.boot_diagnostics.enabled)
self.assertIsNone(vm_fake.diagnostics_profile.boot_diagnostics.storage_uri)
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get):
# pylint: disable=line-too-long
faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
# stub to get the vm which has no datadisks
vm = FakedVM(None, None)
cmd = _get_test_cmd()
mock_vm_get.return_value = vm
# execute
attach_unmanaged_data_disk(cmd, 'rg1', 'vm1', True, faked_vhd_uri)
# assert
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm)
self.assertEqual(len(vm.storage_profile.data_disks), 1)
data_disk = vm.storage_profile.data_disks[0]
self.assertIsNone(data_disk.caching)
self.assertEqual(data_disk.create_option, DiskCreateOptionTypes.empty)
self.assertIsNone(data_disk.image)
self.assertEqual(data_disk.lun, 0)
self.assertTrue(data_disk.name.startswith('vm1-'))
self.assertEqual(data_disk.vhd.uri, faked_vhd_uri)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get):
# pylint: disable=line-too-long
faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
faked_vhd_uri2 = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d2.vhd'
# stub to get the vm which has no datadisks
existing_disk = DataDisk(lun=1, vhd=faked_vhd_uri, name='d1', create_option=DiskCreateOptionTypes.empty)
vm = FakedVM(None, [existing_disk])
cmd = _get_test_cmd()
mock_vm_get.return_value = vm
# execute
attach_unmanaged_data_disk(cmd, 'rg1', 'vm1', True, faked_vhd_uri2, None, 'd2', 512, CachingTypes.read_write)
# assert
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm)
self.assertEqual(len(vm.storage_profile.data_disks), 2)
data_disk = vm.storage_profile.data_disks[1]
self.assertEqual(CachingTypes.read_write, data_disk.caching)
self.assertEqual(DiskCreateOptionTypes.empty, data_disk.create_option)
self.assertIsNone(data_disk.image)
self.assertEqual(data_disk.lun, 0) # the existing disk has '1', so it verifes the second one be picked as '0'
self.assertEqual(data_disk.vhd.uri, faked_vhd_uri2)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get):
# pylint: disable=line-too-long
faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
# stub to get the vm which has no datadisks
vm = FakedVM()
cmd = _get_test_cmd()
mock_vm_get.return_value = vm
# execute
attach_unmanaged_data_disk(cmd, 'rg1', 'vm1', False, faked_vhd_uri, disk_name='d1', caching=CachingTypes.read_only)
# assert
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm)
self.assertEqual(len(vm.storage_profile.data_disks), 1)
data_disk = vm.storage_profile.data_disks[0]
self.assertEqual(CachingTypes.read_only, data_disk.caching)
self.assertEqual(DiskCreateOptionTypes.attach, data_disk.create_option)
self.assertIsNone(data_disk.image)
self.assertEqual(data_disk.lun, 0)
self.assertEqual(data_disk.name, 'd1')
self.assertEqual(data_disk.vhd.uri, faked_vhd_uri)
@mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True)
def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get):
# pylint: disable=line-too-long
# stub to get the vm which has no datadisks
faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
existing_disk = DataDisk(lun=1, vhd=faked_vhd_uri, name='d1', create_option=DiskCreateOptionTypes.empty)
vm = FakedVM(None, [existing_disk])
cmd = _get_test_cmd()
mock_vm_get.return_value = vm
# execute
detach_data_disk(cmd, 'rg1', 'vm1', 'd1')
# assert
self.assertTrue(mock_vm_get.called)
mock_vm_set.assert_called_once_with(cmd, vm)
self.assertEqual(len(vm.storage_profile.data_disks), 0)
@mock.patch('azure.cli.command_modules.vm.custom._compute_client_factory')
def test_show_vmss_instance_view(self, factory_mock):
vm_client = mock.MagicMock()
cmd = _get_test_cmd()
factory_mock.return_value = vm_client
# execute
get_vmss_instance_view(cmd, 'rg1', 'vmss1', '*')
# assert
vm_client.virtual_machine_scale_set_vms.list.assert_called_once_with('rg1', 'vmss1', expand='instanceView',
select='instanceView')
# pylint: disable=line-too-long
@mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.vm.disk_encryption._get_keyvault_key_url', autospec=True)
def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, mock_compute_client_factory):
faked_keyvault = '/subscriptions/01234567-1bf0-4dda-aec3-cb9272f09590/resourceGroups/rg1/providers/Microsoft.KeyVault/vaults/v1'
os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux)
existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty)
vm = FakedVM(None, [existing_disk], os_disk=os_disk)
cmd = _get_test_cmd()
compute_client_mock = mock.MagicMock()
compute_client_mock.virtual_machines.get.return_value = vm
mock_compute_client_factory.return_value = compute_client_mock
mock_get_keyvault_key_url.return_value = 'https://somevaults.vault.azure.net/'
# throw when VM has disks, but no --volume-type is specified
with self.assertRaises(CLIError) as context:
encrypt_vm(cmd, 'rg1', 'vm1', 'client_id', faked_keyvault, 'client_secret')
self.assertTrue("supply --volume-type" in str(context.exception))
# throw when no AAD client secrets
with self.assertRaises(CLIError) as context:
encrypt_vm(cmd, 'rg1', 'vm1', 'client_id', faked_keyvault)
self.assertTrue("--aad-client-cert-thumbprint or --aad-client-secret" in str(context.exception))
@mock.patch('azure.cli.command_modules.vm.disk_encryption.set_vm', autospec=True)
@mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True)
def test_disable_encryption_error_cases_handling(self, mock_compute_client_factory, mock_vm_set): # pylint: disable=unused-argument
os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux)
existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty)
vm = FakedVM(None, [existing_disk], os_disk=os_disk)
cmd = _get_test_cmd()
vm_extension = VirtualMachineExtension(location='westus',
settings={'SequenceVersion': 1},
instance_view=VirtualMachineExtensionInstanceView(
statuses=[InstanceViewStatus(message='Encryption completed successfully')],
substatuses=[InstanceViewStatus(message='{"os":"Encrypted"}')]))
vm_extension.provisioning_state = 'Succeeded'
compute_client_mock = mock.MagicMock()
compute_client_mock.virtual_machines.get.return_value = vm
compute_client_mock.virtual_machine_extensions.get.return_value = vm_extension
mock_compute_client_factory.return_value = compute_client_mock
# throw on disabling encryption on OS disk of a linux VM
with self.assertRaises(CLIError) as context:
decrypt_vm(cmd, 'rg1', 'vm1', 'OS')
self.assertTrue("Only Data disks can have encryption disabled in a Linux VM." in str(context.exception))
# works fine to disable encryption on daat disk when OS disk is never encrypted
vm_extension.instance_view.substatuses[0].message = '{}'
decrypt_vm(cmd, 'rg1', 'vm1', 'DATA')
def test_merge_secrets(self):
secret1 = [{
'sourceVault': {'id': '123'},
'vaultCertificates': [
{
'certificateUrl': 'abc',
'certificateStore': 'My'
}
]}]
secret2 = [{
'sourceVault': {'id': '123'},
'vaultCertificates': [
{
'certificateUrl': 'def',
'certificateStore': 'Machine'
},
{
'certificateUrl': 'xyz',
'certificateStore': 'My'
}
]}]
secret3 = [{
'sourceVault': {'id': '456'},
'vaultCertificates': [
{
'certificateUrl': 'abc',
'certificateStore': 'My'
}
]}]
merged = _merge_secrets([secret1, secret2, secret3])
self.assertIn('456', [item['sourceVault']['id'] for item in merged])
self.assertIn('123', [item['sourceVault']['id'] for item in merged])
vault123 = [item['vaultCertificates'] for item in merged
if item['sourceVault']['id'] == '123'][0]
vault123.sort(key=lambda x: x['certificateUrl'])
vault123Expected = [
{
'certificateUrl': 'abc',
'certificateStore': 'My'
},
{
'certificateUrl': 'def',
'certificateStore': 'Machine'
},
{
'certificateUrl': 'xyz',
'certificateStore': 'My'
}
]
vault123Expected.sort(key=lambda x: x['certificateUrl'])
self.assertListEqual(vault123Expected, vault123)
def test_get_extension_instance_name(self):
instance_view = mock.MagicMock()
extension = mock.MagicMock()
extension.type = 'publisher2.extension2'
instance_view.extensions = [extension]
# action
result = _get_extension_instance_name(instance_view, 'publisher1', 'extension1')
# assert
self.assertEqual(result, 'extension1')
def test_get_extension_instance_name_when_type_none(self):
instance_view = mock.MagicMock()
extension = mock.MagicMock()
extension.type = None
instance_view.extensions = [extension]
# action
result = _get_extension_instance_name(instance_view, 'na', 'extension-name')
# assert
self.assertEqual(result, 'extension-name')
class TestVMBootLog(unittest.TestCase):
@mock.patch('azure.cli.command_modules.vm.custom.logger.warning')
def test_vm_boot_log_handle_unicode(self, logger_warning__mock):
import sys
writer = BootLogStreamWriter(sys.stdout)
writer.write('hello')
writer.write(u'\u54c8') # a random unicode trying to fail default output
# we are good once we are here
@mock.patch('azure.cli.core.profiles.get_sdk', autospec=True)
def test_vm_boot_log_init_storage_sdk(self, get_sdk_mock):
class ErrorToExitCommandEarly(Exception):
pass
cmd_mock = mock.MagicMock()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
get_sdk_mock.side_effect = ErrorToExitCommandEarly()
try:
get_boot_log(cmd_mock, 'rg1', 'vm1')
self.fail("'get_boot_log' didn't exit early")
except ErrorToExitCommandEarly:
get_sdk_mock.assert_called_with(cli_ctx_mock, ResourceType.DATA_STORAGE, 'blob.blockblobservice#BlockBlobService')
class FakedVM(object): # pylint: disable=too-few-public-methods
def __init__(self, nics=None, disks=None, os_disk=None):
self.network_profile = NetworkProfile(network_interfaces=nics)
self.storage_profile = StorageProfile(data_disks=disks, os_disk=os_disk)
self.location = 'westus'
ext = mock.MagicMock()
ext.publisher, ext.virtual_machine_extension_type = 'Microsoft.Azure.Security', 'AzureDiskEncryptionForLinux'
self.resources = [ext]
self.instance_view = mock.MagicMock()
self.instance_view.extensions = [ext]
class FakedAccessExtensionEntity(object): # pylint: disable=too-few-public-methods
def __init__(self, is_linux, version):
self.name = 'VMAccessForLinux' if is_linux else 'VMAccessAgent'
self.type_handler_version = version
if __name__ == '__main__':
unittest.main()
| 46.785377
| 136
| 0.662701
|
ae6ced6732bc1f6208c5feb296196b57f7960839
| 7,777
|
py
|
Python
|
Ros2_Project2_DWA_Pathplanner/dwa_pathplanning_turtlesim/dwa_pathplanning_turtlesim/dynamic_window_approach.py
|
Gonnnnn/Ros_Project
|
b9463855cee1df6673ca721a8792447f1230e978
|
[
"Apache-2.0"
] | 2
|
2022-01-09T19:54:08.000Z
|
2022-01-09T19:54:10.000Z
|
Ros2_Project2_DWA_Pathplanner/dwa_pathplanning_turtlesim/dwa_pathplanning_turtlesim/dynamic_window_approach.py
|
Gonnnnn/Ros_Project
|
b9463855cee1df6673ca721a8792447f1230e978
|
[
"Apache-2.0"
] | null | null | null |
Ros2_Project2_DWA_Pathplanner/dwa_pathplanning_turtlesim/dwa_pathplanning_turtlesim/dynamic_window_approach.py
|
Gonnnnn/Ros_Project
|
b9463855cee1df6673ca721a8792447f1230e978
|
[
"Apache-2.0"
] | null | null | null |
"""
Mobile robot motion planning sample with Dynamic Window Approach
author: Atsushi Sakai (@Atsushi_twi), Göktuğ Karakaşlı
"""
import math
from enum import Enum
import numpy as np
def dwa_control(x, config, goal, ob):
"""
Dynamic Window Approach control
"""
dw = calc_dynamic_window(x, config)
u, trajectory = calc_control_and_trajectory(x, dw, config, goal, ob)
return u, trajectory
class RobotType(Enum):
circle = 0
rectangle = 1
class Config:
"""
simulation parameter class
"""
def __init__(self):
# robot parameter
self.max_speed = 1.0 # [m/s]
self.min_speed = -0.5 # [m/s]
self.max_yaw_rate = 40.0 * math.pi / 180.0 # [rad/s]
self.max_accel = 0.2 # [m/ss]
self.max_delta_yaw_rate = 40.0 * math.pi / 180.0 # [rad/ss]
self.v_resolution = 0.01 # [m/s]
self.yaw_rate_resolution = 0.1 * math.pi / 180.0 # [rad/s]
self.dt = 0.1 # [s] Time tick for motion prediction
self.predict_time = 3.0 # [s]
self.to_goal_cost_gain = 0.15
self.speed_cost_gain = 1.0
self.obstacle_cost_gain = 1.0
self.robot_stuck_flag_cons = 0.001 # constant to prevent robot stucked
self.robot_type = RobotType.circle
# if robot_type == RobotType.circle
# Also used to check if goal is reached in both types
self.robot_radius = 1.0 # [m] for collision check
# if robot_type == RobotType.rectangle
self.robot_width = 0.5 # [m] for collision check
self.robot_length = 1.2 # [m] for collision check
# obstacles [x(m) y(m), ....]
self.ob = np.array([[-1, -1],
[0, 2],
[4.0, 2.0],
[5.0, 4.0],
[5.0, 5.0],
[5.0, 6.0],
[5.0, 9.0],
[8.0, 9.0],
[7.0, 9.0],
[8.0, 10.0],
[9.0, 11.0],
[12.0, 13.0],
[12.0, 12.0],
[15.0, 15.0],
[13.0, 13.0]
])
@property
def robot_type(self):
return self._robot_type
@robot_type.setter
def robot_type(self, value):
if not isinstance(value, RobotType):
raise TypeError("robot_type must be an instance of RobotType")
self._robot_type = value
config = Config()
def motion(x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x
def calc_dynamic_window(x, config):
"""
calculation dynamic window based on current state x
"""
# Dynamic window from robot specification
Vs = [config.min_speed, config.max_speed,
-config.max_yaw_rate, config.max_yaw_rate]
# Dynamic window from motion model
Vd = [x[3] - config.max_accel * config.dt,
x[3] + config.max_accel * config.dt,
x[4] - config.max_delta_yaw_rate * config.dt,
x[4] + config.max_delta_yaw_rate * config.dt]
# [v_min, v_max, yaw_rate_min, yaw_rate_max]
dw = [max(Vs[0], Vd[0]), min(Vs[1], Vd[1]),
max(Vs[2], Vd[2]), min(Vs[3], Vd[3])]
return dw
def predict_trajectory(x_init, v, y, config):
"""
predict trajectory with an input
"""
x = np.array(x_init)
trajectory = np.array(x)
time = 0
while time <= config.predict_time:
x = motion(x, [v, y], config.dt)
trajectory = np.vstack((trajectory, x))
time += config.dt
return trajectory
def calc_control_and_trajectory(x, dw, config, goal, ob):
"""
calculation final input with dynamic window
"""
x_init = x[:]
min_cost = float("inf")
best_u = [0.0, 0.0]
best_trajectory = np.array([x])
# evaluate all trajectory with sampled input in dynamic window
for v in np.arange(dw[0], dw[1], config.v_resolution):
for y in np.arange(dw[2], dw[3], config.yaw_rate_resolution):
trajectory = predict_trajectory(x_init, v, y, config)
# calc cost
to_goal_cost = config.to_goal_cost_gain * calc_to_goal_cost(trajectory, goal)
speed_cost = config.speed_cost_gain * (config.max_speed - trajectory[-1, 3])
ob_cost = config.obstacle_cost_gain * calc_obstacle_cost(trajectory, ob, config)
final_cost = to_goal_cost + speed_cost + ob_cost
# search minimum trajectory
if min_cost >= final_cost:
min_cost = final_cost
best_u = [v, y]
best_trajectory = trajectory
if abs(best_u[0]) < config.robot_stuck_flag_cons \
and abs(x[3]) < config.robot_stuck_flag_cons:
# to ensure the robot do not get stuck in
# best v=0 m/s (in front of an obstacle) and
# best omega=0 rad/s (heading to the goal with
# angle difference of 0)
best_u[1] = -config.max_delta_yaw_rate
return best_u, best_trajectory
def calc_obstacle_cost(trajectory, ob, config):
"""
calc obstacle cost inf: collision
"""
ox = ob[:, 0]
oy = ob[:, 1]
dx = trajectory[:, 0] - ox[:, None]
dy = trajectory[:, 1] - oy[:, None]
r = np.hypot(dx, dy)
if config.robot_type == RobotType.rectangle:
yaw = trajectory[:, 2]
rot = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])
rot = np.transpose(rot, [2, 0, 1])
local_ob = ob[:, None] - trajectory[:, 0:2]
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
local_ob = np.array([local_ob @ x for x in rot])
local_ob = local_ob.reshape(-1, local_ob.shape[-1])
upper_check = local_ob[:, 0] <= config.robot_length / 2
right_check = local_ob[:, 1] <= config.robot_width / 2
bottom_check = local_ob[:, 0] >= -config.robot_length / 2
left_check = local_ob[:, 1] >= -config.robot_width / 2
if (np.logical_and(np.logical_and(upper_check, right_check),
np.logical_and(bottom_check, left_check))).any():
return float("Inf")
elif config.robot_type == RobotType.circle:
if np.array(r <= config.robot_radius).any():
return float("Inf")
min_r = np.min(r)
return 1.0 / min_r # OK
def calc_to_goal_cost(trajectory, goal):
"""
calc to goal cost with angle difference
"""
dx = goal[0] - trajectory[-1, 0]
dy = goal[1] - trajectory[-1, 1]
error_angle = math.atan2(dy, dx)
cost_angle = error_angle - trajectory[-1, 2]
cost = abs(math.atan2(math.sin(cost_angle), math.cos(cost_angle)))
return cost
def main(gx=10.0, gy=10.0, robot_type=RobotType.circle):
print(__file__ + " start!!")
# initial state [x(m), y(m), yaw(rad), v(m/s), omega(rad/s)]
x = np.array([0.0, 0.0, math.pi / 8.0, 0.0, 0.0])
# goal position [x(m), y(m)]
goal = np.array([gx, gy])
# input [forward speed, yaw_rate]
config.robot_type = robot_type
ob = config.ob
while True:
u, predicted_trajectory = dwa_control(x, config, goal, ob)
x = motion(x, u, config.dt) # simulate robot
# check reaching goal
dist_to_goal = math.hypot(x[0] - goal[0], x[1] - goal[1])
if dist_to_goal <= config.robot_radius:
print("Goal!!")
break
if __name__ == '__main__':
main(robot_type=RobotType.rectangle)
# main(robot_type=RobotType.circle)
| 31.108
| 92
| 0.551241
|
5f885471af17e26bfdbe7b43019854475fdf85a7
| 16,499
|
py
|
Python
|
python/tvm/relay/qnn/op/legalizations.py
|
cli99/tvm
|
6c6e873a1325a32418108daad6e38f3df8c37660
|
[
"Apache-2.0"
] | 1
|
2022-02-20T11:35:19.000Z
|
2022-02-20T11:35:19.000Z
|
python/tvm/relay/qnn/op/legalizations.py
|
cli99/tvm
|
6c6e873a1325a32418108daad6e38f3df8c37660
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/qnn/op/legalizations.py
|
cli99/tvm
|
6c6e873a1325a32418108daad6e38f3df8c37660
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend QNN related feature registration"""
import numpy as np
import tvm
from scipy import special
from tvm import relay
from tvm._ffi.base import TVMError
from tvm.relay.qnn.op.canonicalizations import create_integer_lookup_op
from ....topi.x86.utils import target_has_sse42
from .. import op as reg
#################################################
# Register the functions for different operators.
#################################################
# Registering QNN Conv2D legalization function.
@reg.register_qnn_legalize("qnn.conv2d")
def legalize_qnn_conv2d(attrs, inputs, types):
return qnn_conv2d_legalize(attrs, inputs, types)
# Registering QNN Conv2DTranspose legalization function.
@reg.register_qnn_legalize("qnn.conv2d_transpose")
def legalize_qnn_conv2d_transpose(attrs, inputs, types):
return qnn_conv2d_transpose_legalize(attrs, inputs, types)
# Registering QNN dense legalization function.
@reg.register_qnn_legalize("qnn.dense")
def legalize_qnn_dense(attrs, inputs, types):
return qnn_dense_legalize(attrs, inputs, types)
def register_qnn_unary_op_legalize(op_name, floating_point_func):
"""Register unary qnn op for legalization via table lookup op."""
def legalize_qnn_unary_op(attrs, inputs, types):
return create_integer_lookup_op(
input_arg=inputs[0],
floating_point_func=floating_point_func,
in_scale=inputs[1],
in_zero_point=inputs[2],
out_scale=inputs[3],
out_zero_point=inputs[4],
in_dtype=types[0].dtype,
out_dtype=types[0].dtype,
)
return reg.register_qnn_legalize(op_name, legalize_qnn_unary_op)
register_qnn_unary_op_legalize("qnn.sqrt", np.sqrt)
register_qnn_unary_op_legalize("qnn.rsqrt", lambda arr: 1 / np.sqrt(arr))
register_qnn_unary_op_legalize("qnn.exp", np.exp)
register_qnn_unary_op_legalize("qnn.erf", special.erf)
register_qnn_unary_op_legalize("qnn.sigmoid", lambda arr: 1 / (1 + np.exp(-arr)))
register_qnn_unary_op_legalize("qnn.tanh", np.tanh)
# Default to None. If overridden by target, this will not be run.
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_conv2d_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
# Generic QNN Conv2DTranspose legalization function.
@tvm.target.generic_func
def qnn_conv2d_transpose_legalize(attrs, inputs, types):
"""Convert kernel and data to int16, subtract offsets upfront
and calls into relay.nn.conv2d_transpose."""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, "int16")
)
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, "int16")
)
return relay.nn.conv2d_transpose(shift_data, shift_kernel, **attrs)
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_dense_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
###################
# Helper functions.
###################
def get_scalar_from_constant(expr):
"""Returns scalar value from Relay constant scalar."""
assert (
isinstance(expr, relay.Constant) and not expr.data.shape
), "Expr is not a constant scalar."
value = expr.data.numpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value.item(0)
def _shift(data, zero_point, out_dtype):
"""Shifts (add/subtracts) the qnn tensor with +/-128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
if isinstance(zero_point, relay.Constant):
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
else:
zero_point_modified = zero_point + relay.const(shift, "int32")
return (data_modified, zero_point_modified)
# Helper function for lowering in the abscence of fast Int8 arithmetic units.
def helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay_op):
"""Converts QNN operators into a sequence of Relay operators that are friendly to HW that do
not have fast Int8 arithmetic. For example, for ARM, LLVM utilizes the assembly instructions
much more efficiently if the convolution or dense operator input datatypes are int16 instead of
int8. More details are present at https://github.com/apache/tvm/pull/4277.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, dtype="int16")
)
# If kernel zero point is a scalar we can directly subtract it.
if len(types[3].shape) == 0:
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, dtype="int16")
)
# Otherwise it needs to be broadcast.
else:
# Determine output axis of kernel for spatial operations.
if hasattr(attrs, "kernel_layout"):
output_axis = tvm.tir.layout(attrs["kernel_layout"]).index_of("O")
# For dense operations, broadcast to [N, K] layout.
elif isinstance(attrs, relay.op.op_attrs.DenseAttrs):
output_axis = 0
# For matrix multiplication instead expand to [K, N] layout.
elif isinstance(attrs, relay.op.op_attrs.MatmulAttrs):
output_axis = 1
else:
raise TVMError(
"Legalization of %s is not yet supported with per channel parameters"
% str(type(attrs))
)
shift_kernel = relay.nn.bias_add(
relay.cast(kernel, dtype="int16"),
relay.cast(kernel_zero_point, dtype="int16"),
output_axis,
)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(shift_data, shift_kernel, **new_attrs)
# Helper function to change dtypes to uint8 x int8. Intel VNNI instructions prefer this setting.
def helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Intel HW. VNNI supports u8 x i8 fast conv/MM. If the dtypes
are already good, we dont transform. Else, we shift the tensor values and zero points to change
the dtype.
Converting from int8 to uint8 can be done in following manner.
Original equation
scale * (QA - zp_a)
scale * (QA + 128 - 128 - zp_a)
scale * ( (QA + 128) - (zp_a + 128))
Replacing QA + 128 with QA' and (zp_a + 128) with zp_a'
We get our new quantized uint8 tensor - scale * (QA' - zp_a')
Similarly we can convert from int8 to uint8.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# VNNI supports u8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "uint8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "int8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "uint8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to int8 x int8. Cuda dp4a instructions prefer this setting.
def helper_change_dtypes_to_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Nvidia HW. dp4a supports i8 x i8 fast conv/MM. If the
dtypes are already good, we dont transform. Else, we shift the tensor values and zero points
to change the dtype.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# dp4a supports i8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "int8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "uint8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "int8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to be same. ARM dotprod instructions prefer this setting.
def helper_change_dtypes_to_be_same(attrs, inputs, types, relay_op):
"""Sometimes MxNet + MLDNN can lead to uint8 x int8 datatypes for the conv inputs. However,
many devices like ARM prefer the datatypes to be same for the HW units. This helper transforms
conv2d/dense such that both the dtypes are same.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
def _shift(data, zero_point, out_dtype):
"""Shifts (adds/subtracts) the qnn tensor by 128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
return (data_modified, zero_point_modified)
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
if data_dtype == kernel_dtype:
return None
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
assert (
"int8" in data_dtype and "int8" in kernel_dtype
), "Qnn Conv2D/Dense only accepts uint8 or int8 inputs"
# Shift input if necessary.
data, input_zero_point = _shift(data, input_zero_point, kernel_dtype)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
def is_fast_int8_on_intel():
"""Checks whether the hardware has support for fast Int8 arithmetic operations."""
target = tvm.target.Target.current(allow_none=False)
return target_has_sse42(target.mcpu)
def is_fast_int8_on_arm():
"""Checks whether the hardware has support for fast Int8 arithmetic operations."""
target = tvm.target.Target.current(allow_none=False)
return "+v8.2a" in target.mattr and "+dotprod" in target.mattr
def is_aarch64_arm():
"""Checks whether we are compiling for an AArch64 target."""
target = tvm.target.Target.current(allow_none=False)
return "aarch64" in target.attrs.get("mtriple", "")
########################
# ARM CPU legalizations.
########################
@qnn_conv2d_legalize.register("arm_cpu")
def _qnn_conv2d_legalize_arm_cpu(attrs, inputs, types):
# ARM prefers the dtypes to be same.
is_depthwise = relay.op.strategy.is_depthwise_conv2d(
types[0].shape,
attrs["data_layout"],
types[1].shape,
attrs["kernel_layout"],
attrs["groups"],
)
use_int8_on_arm = (not is_depthwise) and is_aarch64_arm() and attrs["data_layout"] == "NHWC"
if use_int8_on_arm or is_fast_int8_on_arm():
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.conv2d)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
@qnn_dense_legalize.register("arm_cpu")
def _qnn_dense_legalize_arm_cpu(attrs, inputs, types):
# ARM prefers the dtypes to be same.
if is_fast_int8_on_arm():
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.dense)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
##########################
# Intel CPU legalizations.
##########################
@qnn_conv2d_legalize.register("cpu")
def _qnn_conv2d_legalize_intel_cpu(attrs, inputs, types):
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.conv2d)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
@qnn_dense_legalize.register("cpu")
def _qnn_dense_legalize_intel_cpu(attrs, inputs, types):
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.dense)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
#####################
# CUDA legalizations.
#####################
@qnn_conv2d_legalize.register("cuda")
def _qnn_conv2d_legalize_cuda(attrs, inputs, types):
# CUDA prefers both datatypes to be int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.conv2d)
@qnn_dense_legalize.register("cuda")
def _qnn_dense_legalize_cuda(attrs, inputs, types):
# CUDA prefers both datatypes to be the int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.dense)
| 36.261538
| 100
| 0.68683
|
4dce8df74a2e4b3dc7f5a33bdbd794cee7d8007e
| 2,710
|
py
|
Python
|
clinica/pipelines/pet_linear/pet_linear_cli.py
|
NicolasGensollen/clinica
|
a17e1a05d7f8daf97f70de883ed8acc8714290c3
|
[
"MIT"
] | null | null | null |
clinica/pipelines/pet_linear/pet_linear_cli.py
|
NicolasGensollen/clinica
|
a17e1a05d7f8daf97f70de883ed8acc8714290c3
|
[
"MIT"
] | null | null | null |
clinica/pipelines/pet_linear/pet_linear_cli.py
|
NicolasGensollen/clinica
|
a17e1a05d7f8daf97f70de883ed8acc8714290c3
|
[
"MIT"
] | null | null | null |
from typing import Optional
import click
from clinica.pipelines import cli_param
pipeline_name = "pet-linear"
@click.command(name=pipeline_name)
@cli_param.argument.bids_directory
@cli_param.argument.caps_directory
@cli_param.argument.acq_label
@cli_param.argument.suvr_reference_region
@cli_param.option_group.pipeline_specific_options
@cli_param.option_group.option(
"-ui",
"--uncropped_image",
is_flag=True,
help="Do not crop the image with template (cropped image are suggested for using with DL models)",
)
@cli_param.option_group.option(
"--save_pet_in_t1w_space",
is_flag=True,
help="Save the PET image in the T1w space computed in the intermediate step of the pipeline",
)
@cli_param.option_group.common_pipelines_options
@cli_param.option.subjects_sessions_tsv
@cli_param.option.working_directory
@cli_param.option.n_procs
def cli(
bids_directory: str,
caps_directory: str,
acq_label: str,
suvr_reference_region: str,
uncropped_image: bool = False,
save_pet_in_t1w_space: bool = False,
subjects_sessions_tsv: Optional[str] = None,
working_directory: Optional[str] = None,
n_procs: Optional[int] = None,
) -> None:
"""Affine registration of PET images to the MNI standard space.
ACQ_LABEL corresponds the label given to the PET acquisition, specifying the tracer used.
Frequently used values are '18FFDG' or '18FAV45'.
The reference region must be specified to perform intensity normalization.
Accepted values include: 'pons', 'cerebellumPons', 'pons2', 'cerebellumPons2'.
Prerequisite: You need to have performed the t1-linear pipeline on your T1-weighted MR images.
See https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/PET_Linear/"
"""
from networkx import Graph
from clinica.utils.ux import print_end_pipeline
from .pet_linear_pipeline import PETLinear
parameters = {
"acq_label": acq_label,
"suvr_reference_region": suvr_reference_region,
"uncropped_image": uncropped_image,
"save_PETinT1w": save_pet_in_t1w_space,
}
pipeline = PETLinear(
bids_directory=bids_directory,
caps_directory=caps_directory,
tsv_file=subjects_sessions_tsv,
base_dir=working_directory,
parameters=parameters,
name=pipeline_name,
)
exec_pipeline = (
pipeline.run(plugin="MultiProc", plugin_args={"n_procs": n_procs})
if n_procs
else pipeline.run()
)
if isinstance(exec_pipeline, Graph):
print_end_pipeline(
pipeline_name, pipeline.base_dir, pipeline.base_dir_was_specified
)
if __name__ == "__main__":
cli()
| 30.111111
| 102
| 0.726199
|
600d79bfc5cd16a9ce1040e58ffb9263d7332ecd
| 1,313
|
py
|
Python
|
catalog/migrations/0004_auto_20191218_1421.py
|
konmacie/cookbook
|
3adaf1e4570190c8fbc3414542d80b398976cc92
|
[
"MIT"
] | null | null | null |
catalog/migrations/0004_auto_20191218_1421.py
|
konmacie/cookbook
|
3adaf1e4570190c8fbc3414542d80b398976cc92
|
[
"MIT"
] | null | null | null |
catalog/migrations/0004_auto_20191218_1421.py
|
konmacie/cookbook
|
3adaf1e4570190c8fbc3414542d80b398976cc92
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.7 on 2019-12-18 14:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0003_favourite'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='edit_date',
field=models.DateTimeField(auto_now=True),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(auto_now_add=True)),
('text', models.CharField(max_length=250)),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='catalog.Recipe')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Comment',
'verbose_name_plural': 'Comments',
'ordering': ['-pub_date'],
},
),
]
| 35.486486
| 137
| 0.596344
|
b438f52029eb039b447b7e4c7b09b8887c0c3599
| 3,219
|
py
|
Python
|
tempest/stress/actions/volume_attach_delete.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/stress/actions/volume_attach_delete.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/stress/actions/volume_attach_delete.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
# (c) 2013 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
from tempest import config
import tempest.stress.stressaction as stressaction
CONF = config.CONF
class VolumeAttachDeleteTest(stressaction.StressAction):
def setUp(self, **kwargs):
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
def run(self):
# Step 1: create volume
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
resp, volume = self.manager.volumes_client.create_volume(
size=1,
display_name=name)
assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.logger.info("created volume: %s" % volume['id'])
# Step 2: create vm instance
vm_name = data_utils.rand_name("instance")
self.logger.info("creating vm: %s" % vm_name)
resp, server = self.manager.servers_client.create_server(
vm_name, self.image, self.flavor)
server_id = server['id']
assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.logger.info("created vm %s" % server_id)
# Step 3: attach volume to vm
self.logger.info("attach volume (%s) to vm %s" %
(volume['id'], server_id))
resp, body = self.manager.servers_client.attach_volume(server_id,
volume['id'],
'/dev/vdc')
assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'in-use')
self.logger.info("volume (%s) attached to vm %s" %
(volume['id'], server_id))
# Step 4: delete vm
self.logger.info("deleting vm: %s" % vm_name)
resp, _ = self.manager.servers_client.delete_server(server_id)
assert(resp.status == 204)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted vm: %s" % server_id)
# Step 5: delete volume
self.logger.info("deleting volume: %s" % volume['id'])
resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
assert(resp.status == 202)
self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
self.logger.info("deleted volume: %s" % volume['id'])
| 43.5
| 79
| 0.607021
|
6b98c3584725afdcabd80df7d7a4b8662261ea6b
| 6,288
|
py
|
Python
|
suites/API/DatabaseApi/BlocksTransactions/GetRecentTransactionById.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 1
|
2021-03-12T05:17:02.000Z
|
2021-03-12T05:17:02.000Z
|
suites/API/DatabaseApi/BlocksTransactions/GetRecentTransactionById.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 1
|
2019-11-19T12:10:59.000Z
|
2019-11-19T12:10:59.000Z
|
suites/API/DatabaseApi/BlocksTransactions/GetRecentTransactionById.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 2
|
2019-04-29T10:46:48.000Z
|
2019-10-29T10:01:03.000Z
|
# -*- coding: utf-8 -*-
from time import strptime
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to, has_length, is_none, is_true, require_that
SUITE = {
"description": "Method 'get_recent_transaction_by_id'"
}
# todo: work only for pending transactions echo:0.19.0-rc.0
@lcc.disabled()
@lcc.prop("main", "type")
@lcc.tags("api", "database_api", "database_api_blocks_transactions", "get_recent_transaction_by_id")
@lcc.suite("Check work of method 'get_recent_transaction_by_id'", rank=1)
class GetRecentTransactionById(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.echo_acc0 = None
self.echo_acc1 = None
def compare_objects(self, first_field, second_field, key=None):
if isinstance(first_field, (list, dict)):
if isinstance(first_field, list) and len(first_field):
for key, elem in enumerate(first_field):
self.compare_objects(elem, second_field[key])
elif isinstance(first_field, dict) and len(first_field):
for key in list(first_field.keys()):
self.compare_objects(first_field[key], second_field[key], key)
else:
description = "list element"
if key:
description = "'{}'".format(key)
check_that("{}".format(description), first_field, equal_to(second_field), quiet=True)
@staticmethod
def compare_datetimes(first_time, second_time):
pattern = "%Y-%m-%dT%H:%M:%S"
return strptime(first_time, pattern) > strptime(second_time, pattern)
def get_last_block_time(self):
response_id = self.send_request(
self.get_request("get_dynamic_global_properties"), self.__database_api_identifier
)
return self.get_response(response_id)["result"]["time"]
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
lcc.log_info(
"API identifiers are: database='{}', registration='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc1 = self.get_account_id(
self.accounts[1], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo accounts are: #1='{}', #2='{}'".format(self.echo_acc0, self.echo_acc1))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'get_recent_transaction_by_id'")
def method_main_check(self):
lcc.set_step("Collect 'get_recent_transaction_by_id' operation")
transfer_operation = self.echo_ops.get_transfer_operation(
echo=self.echo, from_account_id=self.echo_acc0, to_account_id=self.echo_acc1
)
lcc.log_info("Transfer operation: '{}'".format(str(transfer_operation)))
lcc.set_step("Broadcast transaction that contains simple transfer operation to the ECHO network")
collected_operation = self.collect_operations(transfer_operation, self.__database_api_identifier)
expiration = self.get_expiration_time(1)
broadcast_result = self.echo_ops.broadcast(
echo=self.echo, list_operations=collected_operation, expiration=expiration, log_broadcast=False
)
require_that(
"broadcast transaction complete successfully",
self.is_operation_completed(broadcast_result, 0),
is_true(),
quiet=True
)
lcc.set_step("Get recent transaction by id (before it expire)")
params = [broadcast_result["id"]]
response_id = self.send_request(
self.get_request("get_recent_transaction_by_id", params), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.log_info("Call method 'get_recent_transaction_by_id' with transaction_id='{}' parameter".format(params))
lcc.set_step("Compare transaction objects (broadcast_result, 'get_recent_transaction_by_id' method)")
transaction_from_broadcast_result = broadcast_result["trx"]
transaction_from_api_method = response["result"]
require_that("'transaction from broadcast result'", transaction_from_broadcast_result, has_length(9))
require_that(
"'transaction from 'get_recent_transaction_by_id' method result'", transaction_from_api_method,
has_length(7)
)
self.compare_objects(transaction_from_api_method, transaction_from_broadcast_result)
lcc.set_step("Wait time for transaction expiration")
while True:
expiration_status = self.compare_datetimes(
self.get_datetime(global_datetime=True), transaction_from_broadcast_result["expiration"]
)
if expiration_status:
break
lcc.set_step("Get recent transaction by id (after it expire)")
while True:
last_block_time = self.get_last_block_time()
if self.compare_datetimes(last_block_time, expiration):
lcc.log_info("Call method 'get_recent_transaction_by_id' with transaction_id='{}'".format(params))
response_id = self.send_request(
self.get_request("get_recent_transaction_by_id", params), self.__database_api_identifier
)
response = self.get_response(response_id)
lcc.set_step("Check 'get_recent_transaction_by_id' method result for expired transaction")
require_that("'expired transaction result'", response["result"], is_none())
break
self.produce_block(self.__database_api_identifier)
| 45.565217
| 116
| 0.676527
|
63524766f1d6a4930702354a5bfa45bcbd11d5ae
| 7,729
|
py
|
Python
|
qiskit/extensions/quantum_initializer/ucrot.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 3
|
2019-05-19T17:39:38.000Z
|
2020-01-28T19:59:18.000Z
|
qiskit/extensions/quantum_initializer/ucrot.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 4
|
2019-05-13T15:28:46.000Z
|
2019-12-19T20:47:02.000Z
|
qiskit/extensions/quantum_initializer/ucrot.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 1
|
2021-07-07T16:55:41.000Z
|
2021-07-07T16:55:41.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# The structure of the code is based on Emanuel Malvetti's semester thesis at ETH in 2018,
# which was supervised by Raban Iten and Prof. Renato Renner.
"""
(Abstract) base class for uniformly controlled (also called multiplexed) single-qubit rotations R_t.
This class provides a basis for the decomposition of uniformly controlled R_x,R_y and R_z gates
(i.e., for t=x,y,z). These gates can have several control qubits and a single target qubit.
If the k control qubits are in the state ket(i) (in the computational bases),
a single-qubit rotation R_t(a_i) is applied to the target qubit for a (real) angle a_i.
"""
import math
import numpy as np
from qiskit.circuit import Gate, QuantumCircuit
from qiskit.circuit.quantumcircuit import QuantumRegister
from qiskit.exceptions import QiskitError
_EPS = 1e-10 # global variable used to chop very small numbers to zero
class UCRot(Gate):
"""
Uniformly controlled rotations (also called multiplexed rotations).
The decomposition is based on 'Synthesis of Quantum Logic Circuits'
by Shende et al. (https://arxiv.org/pdf/quant-ph/0406176.pdf)
Input:
angle_list = list of (real) rotation angles [a_0,...,a_{2^k-1}]. Must have at least one entry.
rot_axis = rotation axis for the single qubit rotations
(currently, "X","Y" and "Z" are supported)
"""
def __init__(self, angle_list, rot_axis):
self.rot_axes = rot_axis
# Check if angle_list has type "list"
if not isinstance(angle_list, list):
raise QiskitError("The angles are not provided in a list.")
# Check if the angles in angle_list are real numbers
for angle in angle_list:
try:
float(angle)
except TypeError:
raise QiskitError(
"An angle cannot be converted to type float (real angles are expected).")
num_contr = math.log2(len(angle_list))
if num_contr < 0 or not num_contr.is_integer():
raise QiskitError(
"The number of controlled rotation gates is not a non-negative power of 2.")
if rot_axis not in ("X", "Y", "Z"):
raise QiskitError("Rotation axis is not supported.")
# Create new gate.
num_qubits = int(num_contr) + 1
super().__init__("ucrot" + rot_axis, num_qubits, angle_list)
def _define(self):
ucr_circuit = self._dec_ucrot()
gate_num = len(ucr_circuit.data)
gate = ucr_circuit.to_instruction()
q = QuantumRegister(self.num_qubits)
ucr_circuit = QuantumCircuit(q)
if gate_num == 0:
# ToDo: if we would not add the identity here, this would lead to troubles
# ToDo: simulating the circuit afterwards.
# this should probably be fixed in the bahaviour of QuantumCircuit.
ucr_circuit.iden(q[0])
else:
ucr_circuit.append(gate, q[:])
self.definition = ucr_circuit.data
def _dec_ucrot(self):
"""
finds a decomposition of a UC rotation gate into elementary gates
(C-NOTs and single-qubit rotations).
"""
q = QuantumRegister(self.num_qubits)
circuit = QuantumCircuit(q)
q_target = q[0]
q_controls = q[1:]
if not q_controls: # equivalent to: if len(q_controls) == 0
if self.rot_axes == "X":
if np.abs(self.params[0]) > _EPS:
circuit.rx(self.params[0], q_target)
if self.rot_axes == "Y":
if np.abs(self.params[0]) > _EPS:
circuit.ry(self.params[0], q_target)
if self.rot_axes == "Z":
if np.abs(self.params[0]) > _EPS:
circuit.rz(self.params[0], q_target)
else:
# First, we find the rotation angles of the single-qubit rotations acting
# on the target qubit
angles = self.params.copy()
UCRot._dec_uc_rotations(angles, 0, len(angles), False)
# Now, it is easy to place the C-NOT gates to get back the full decomposition.s
for (i, angle) in enumerate(angles):
if self.rot_axes == "X":
if np.abs(angle) > _EPS:
circuit.rx(angle, q_target)
if self.rot_axes == "Y":
if np.abs(angle) > _EPS:
circuit.ry(angle, q_target)
if self.rot_axes == "Z":
if np.abs(angle) > _EPS:
circuit.rz(angle, q_target)
# Determine the index of the qubit we want to control the C-NOT gate.
# Note that it corresponds
# to the number of trailing zeros in the binary representaiton of i+1
if not i == len(angles) - 1:
binary_rep = np.binary_repr(i + 1)
q_contr_index = len(binary_rep) - len(binary_rep.rstrip('0'))
else:
# Handle special case:
q_contr_index = len(q_controls) - 1
# For X rotations, we have to additionally place some Ry gates around the
# C-NOT gates. They change the basis of the NOT operation, such that the
# decomposition of for uniformly controlled X rotations works correctly by symmetry
# with the decomposition of uniformly controlled Z or Y rotations
if self.rot_axes == "X":
circuit.ry(np.pi / 2, q_target)
circuit.cx(q_controls[q_contr_index], q_target)
if self.rot_axes == "X":
circuit.ry(-np.pi / 2, q_target)
return circuit
@staticmethod
def _dec_uc_rotations(angles, start_index, end_index, reversedDec):
"""
Calculates rotation angles for a uniformly controlled R_t gate with a C-NOT gate at
the end of the circuit. The rotation angles of the gate R_t are stored in
angles[start_index:end_index]. If reversed == True, it decomposes the gate such that
there is a C-NOT gate at the start of the circuit (in fact, the circuit topology for
the reversed decomposition is the reversed one of the original decomposition)
"""
interval_len_half = (end_index - start_index) // 2
for i in range(start_index, start_index + interval_len_half):
if not reversedDec:
angles[i], angles[i + interval_len_half] = UCRot._update_angles(angles[i], angles[
i + interval_len_half])
else:
angles[i + interval_len_half], angles[i] = UCRot._update_angles(angles[i], angles[
i + interval_len_half])
if interval_len_half <= 1:
return
else:
UCRot._dec_uc_rotations(angles, start_index, start_index + interval_len_half, False)
UCRot._dec_uc_rotations(angles, start_index + interval_len_half, end_index, True)
@staticmethod
def _update_angles(angle1, angle2):
"""Calculate the new rotation angles according to Shende's decomposition"""
return (angle1 + angle2) / 2.0, (angle1 - angle2) / 2.0
| 45.733728
| 100
| 0.614569
|
5983838a7484d098dcbc95680a5015628fbd744a
| 11,847
|
py
|
Python
|
nonebot/adapters/_base.py
|
notnotype/nonebot2
|
4661adc564c854b88fdcd4db5e30762ad5c1051d
|
[
"MIT"
] | 2
|
2020-11-20T02:56:04.000Z
|
2020-12-30T12:40:47.000Z
|
nonebot/adapters/_base.py
|
anlen123/nonebot2
|
36d0628e4f6b4a3a81858600006aed16f647d94d
|
[
"MIT"
] | null | null | null |
nonebot/adapters/_base.py
|
anlen123/nonebot2
|
36d0628e4f6b4a3a81858600006aed16f647d94d
|
[
"MIT"
] | 1
|
2020-11-24T09:15:51.000Z
|
2020-11-24T09:15:51.000Z
|
"""
协议适配基类
============
各协议请继承以下基类,并使用 ``driver.register_adapter`` 注册适配器
"""
import abc
from copy import copy
from typing_extensions import Literal
from functools import reduce, partial
from dataclasses import dataclass, field
from typing import Any, Dict, Union, TypeVar, Mapping, Optional, Callable, Iterable, Iterator, Awaitable, TYPE_CHECKING
from pydantic import BaseModel
from nonebot.utils import DataclassEncoder
if TYPE_CHECKING:
from nonebot.config import Config
from nonebot.drivers import Driver, WebSocket
class Bot(abc.ABC):
"""
Bot 基类。用于处理上报消息,并提供 API 调用接口。
"""
driver: "Driver"
"""Driver 对象"""
config: "Config"
"""Config 配置对象"""
@abc.abstractmethod
def __init__(self,
connection_type: str,
self_id: str,
*,
websocket: Optional["WebSocket"] = None):
"""
:参数:
* ``connection_type: str``: http 或者 websocket
* ``self_id: str``: 机器人 ID
* ``websocket: Optional[WebSocket]``: Websocket 连接对象
"""
self.connection_type = connection_type
"""连接类型"""
self.self_id = self_id
"""机器人 ID"""
self.websocket = websocket
"""Websocket 连接对象"""
def __getattr__(self, name: str) -> Callable[..., Awaitable[Any]]:
return partial(self.call_api, name)
@property
@abc.abstractmethod
def type(self) -> str:
"""Adapter 类型"""
raise NotImplementedError
@classmethod
def register(cls, driver: "Driver", config: "Config"):
"""
:说明:
`register` 方法会在 `driver.register_adapter` 时被调用,用于初始化相关配置
"""
cls.driver = driver
cls.config = config
@classmethod
@abc.abstractmethod
async def check_permission(cls, driver: "Driver", connection_type: str,
headers: dict, body: Optional[dict]) -> str:
"""
:说明:
检查连接请求是否合法的函数,如果合法则返回当前连接 ``唯一标识符``,通常为机器人 ID;如果不合法则抛出 ``RequestDenied`` 异常。
:参数:
* ``driver: Driver``: Driver 对象
* ``connection_type: str``: 连接类型
* ``headers: dict``: 请求头
* ``body: Optional[dict]``: 请求数据,WebSocket 连接该部分为空
:返回:
- ``str``: 连接唯一标识符
:异常:
- ``RequestDenied``: 请求非法
"""
raise NotImplementedError
@abc.abstractmethod
async def handle_message(self, message: dict):
"""
:说明:
处理上报消息的函数,转换为 ``Event`` 事件后调用 ``nonebot.message.handle_event`` 进一步处理事件。
:参数:
* ``message: dict``: 收到的上报消息
"""
raise NotImplementedError
@abc.abstractmethod
async def call_api(self, api: str, **data):
"""
:说明:
调用机器人 API 接口,可以通过该函数或直接通过 bot 属性进行调用
:参数:
* ``api: str``: API 名称
* ``**data``: API 数据
:示例:
.. code-block:: python
await bot.call_api("send_msg", message="hello world")
await bot.send_msg(message="hello world")
"""
raise NotImplementedError
@abc.abstractmethod
async def send(self, event: "Event",
message: Union[str, "Message", "MessageSegment"], **kwargs):
"""
:说明:
调用机器人基础发送消息接口
:参数:
* ``event: Event``: 上报事件
* ``message: Union[str, Message, MessageSegment]``: 要发送的消息
* ``**kwargs``
"""
raise NotImplementedError
T_Message = TypeVar("T_Message", bound="Message")
T_MessageSegment = TypeVar("T_MessageSegment", bound="MessageSegment")
@dataclass
class MessageSegment(abc.ABC, Mapping):
"""消息段基类"""
type: str
"""
- 类型: ``str``
- 说明: 消息段类型
"""
data: Dict[str, Any] = field(default_factory=lambda: {})
"""
- 类型: ``Dict[str, Union[str, list]]``
- 说明: 消息段数据
"""
@abc.abstractmethod
def __str__(self) -> str:
"""该消息段所代表的 str,在命令匹配部分使用"""
raise NotImplementedError
def __len__(self) -> int:
return len(str(self))
def __ne__(self: T_MessageSegment, other: T_MessageSegment) -> bool:
return not self == other
@abc.abstractmethod
def __add__(self: T_MessageSegment, other: Union[str, T_MessageSegment,
T_Message]) -> T_Message:
"""你需要在这里实现不同消息段的合并:
比如:
if isinstance(other, str):
...
elif isinstance(other, MessageSegment):
...
注意:需要返回一个新生成的对象
"""
raise NotImplementedError
@abc.abstractmethod
def __radd__(
self: T_MessageSegment, other: Union[str, dict, list, T_MessageSegment,
T_Message]) -> "T_Message":
"""你需要在这里实现不同消息段的合并:
比如:
if isinstance(other, str):
...
elif isinstance(other, MessageSegment):
...
注意:需要返回一个新生成的对象
"""
raise NotImplementedError
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __iter__(self):
yield from self.data.__iter__()
def __contains__(self, key: object) -> bool:
return key in self.data
def get(self, key: str, default=None):
return getattr(self, key, default)
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def copy(self: T_MessageSegment) -> T_MessageSegment:
return copy(self)
@abc.abstractmethod
def is_text(self) -> bool:
raise NotImplementedError
class Message(list, abc.ABC):
"""消息数组"""
def __init__(self,
message: Union[str, None, Mapping, Iterable[Mapping],
T_MessageSegment, T_Message, Any] = None,
*args,
**kwargs):
"""
:参数:
* ``message: Union[str, list, dict, MessageSegment, Message, Any]``: 消息内容
"""
super().__init__(*args, **kwargs)
if message is None:
return
elif isinstance(message, Message):
self.extend(message)
elif isinstance(message, MessageSegment):
self.append(message)
else:
self.extend(self._construct(message))
def __str__(self):
return ''.join((str(seg) for seg in self))
@classmethod
def __get_validators__(cls):
yield cls._validate
@classmethod
def _validate(cls, value):
return cls(value)
@staticmethod
@abc.abstractmethod
def _construct(
msg: Union[str, Mapping, Iterable[Mapping], Any]
) -> Iterable[T_MessageSegment]:
raise NotImplementedError
def __add__(self: T_Message, other: Union[str, T_MessageSegment,
T_Message]) -> T_Message:
result = self.__class__(self)
if isinstance(other, str):
result.extend(self._construct(other))
elif isinstance(other, MessageSegment):
result.append(other)
elif isinstance(other, Message):
result.extend(other)
return result
def __radd__(self: T_Message, other: Union[str, T_MessageSegment,
T_Message]) -> T_Message:
result = self.__class__(other)
return result.__add__(self)
def __iadd__(self: T_Message, other: Union[str, T_MessageSegment,
T_Message]) -> T_Message:
if isinstance(other, str):
self.extend(self._construct(other))
elif isinstance(other, MessageSegment):
self.append(other)
elif isinstance(other, Message):
self.extend(other)
return self
def append(self: T_Message, obj: Union[str, T_MessageSegment]) -> T_Message:
"""
:说明:
添加一个消息段到消息数组末尾
:参数:
* ``obj: Union[str, MessageSegment]``: 要添加的消息段
"""
if isinstance(obj, MessageSegment):
super().append(obj)
elif isinstance(obj, str):
self.extend(self._construct(obj))
else:
raise ValueError(f"Unexpected type: {type(obj)} {obj}")
return self
def extend(self: T_Message,
obj: Union[T_Message, Iterable[T_MessageSegment]]) -> T_Message:
"""
:说明:
拼接一个消息数组或多个消息段到消息数组末尾
:参数:
* ``obj: Union[Message, Iterable[MessageSegment]]``: 要添加的消息数组
"""
for segment in obj:
self.append(segment)
return self
def reduce(self: T_Message) -> None:
"""
:说明:
缩减消息数组,即按 MessageSegment 的实现拼接相邻消息段
"""
index = 0
while index < len(self):
if index > 0 and self[index -
1].is_text() and self[index].is_text():
self[index - 1] += self[index]
del self[index]
else:
index += 1
def extract_plain_text(self: T_Message) -> str:
"""
:说明:
提取消息内纯文本消息
"""
def _concat(x: str, y: T_MessageSegment) -> str:
return f"{x} {y}" if y.is_text() else x
plain_text = reduce(_concat, self, "")
return plain_text[1:] if plain_text else plain_text
class Event(abc.ABC, BaseModel):
"""Event 基类。提供获取关键信息的方法,其余信息可直接获取。"""
class Config:
extra = "allow"
json_encoders = {Message: DataclassEncoder}
@abc.abstractmethod
def get_type(self) -> str:
"""
:说明:
获取事件类型的方法,类型通常为 NoneBot 内置的四种类型。
:返回:
* ``Literal["message", "notice", "request", "meta_event"]``
* 其他自定义 ``str``
"""
raise NotImplementedError
@abc.abstractmethod
def get_event_name(self) -> str:
"""
:说明:
获取事件名称的方法。
:返回:
* ``str``
"""
raise NotImplementedError
@abc.abstractmethod
def get_event_description(self) -> str:
"""
:说明:
获取事件描述的方法,通常为事件具体内容。
:返回:
* ``str``
"""
raise NotImplementedError
def __str__(self) -> str:
return f"[{self.get_event_name()}]: {self.get_event_description()}"
def get_log_string(self) -> str:
"""
:说明:
获取事件日志信息的方法,通常你不需要修改这个方法,只有当希望 NoneBot 隐藏该事件日志时,可以抛出 ``NoLogException`` 异常。
:返回:
* ``str``
:异常:
- ``NoLogException``
"""
return f"[{self.get_event_name()}]: {self.get_event_description()}"
@abc.abstractmethod
def get_user_id(self) -> str:
"""
:说明:
获取事件主体 id 的方法,通常是用户 id 。
:返回:
* ``str``
"""
raise NotImplementedError
@abc.abstractmethod
def get_session_id(self) -> str:
"""
:说明:
获取会话 id 的方法,用于判断当前事件属于哪一个会话,通常是用户 id、群组 id 组合。
:返回:
* ``str``
"""
raise NotImplementedError
@abc.abstractmethod
def get_message(self) -> Message:
"""
:说明:
获取事件消息内容的方法。
:返回:
* ``Message``
"""
raise NotImplementedError
def get_plaintext(self) -> str:
"""
:说明:
获取消息纯文本的方法,通常不需要修改,默认通过 ``get_message().extract_plain_text`` 获取。
:返回:
* ``str``
"""
return self.get_message().extract_plain_text()
@abc.abstractmethod
def is_tome(self) -> bool:
"""
:说明:
获取事件是否与机器人有关的方法。
:返回:
* ``bool``
"""
raise NotImplementedError
| 23.885081
| 119
| 0.534228
|
a6bd1e8d4c3c8eca63f00b4f95c807d2487834f7
| 587
|
py
|
Python
|
lego/apps/ical/constants.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 45
|
2017-10-24T12:09:06.000Z
|
2021-11-03T21:21:03.000Z
|
lego/apps/ical/constants.py
|
ollfkaih/lego
|
b15aacaf09efe90e7f984d25b0e7bddbe12647e8
|
[
"MIT"
] | 980
|
2017-10-24T12:29:07.000Z
|
2022-03-31T04:04:31.000Z
|
lego/apps/ical/constants.py
|
wahello/lego
|
a0b02f3abc997fe96326e9c9c05b49847170041b
|
[
"MIT"
] | 23
|
2018-04-11T16:34:22.000Z
|
2021-11-23T12:28:30.000Z
|
HISTORY_BACKWARDS_IN_DAYS = 10
REGISTRATION_EVENT_LENGTH_IN_MINUTES = 30
TYPE_PERSONAL = "Personal"
TYPE_EVENTS = "Events"
TYPE_REGISTRATIONS = "Registrations"
TITLES = {
TYPE_PERSONAL: "Møter og Favorittarrangementer",
TYPE_EVENTS: "Arrangementer",
TYPE_REGISTRATIONS: "Registreringstidspunkt",
}
DESCRIPTIONS = {
TYPE_PERSONAL: "Dine møter og favorittarrangementer på abakus.no",
TYPE_EVENTS: "Oversikt over bedriftspresentasjoner, kurs og andre arrangementer på abakus.no",
TYPE_REGISTRATIONS: "Registreringstidspunktene til arrangementene på abakus.no",
}
| 30.894737
| 98
| 0.785349
|
30c57203c80de33e20d367415e5f0e7bc2729bb4
| 2,448
|
py
|
Python
|
code/args.py
|
DongjaeJang/Deep-Knowledge-Tracing
|
aab72939a6cbdfc8b7f11bf074040b48771cbf3f
|
[
"Unlicense"
] | null | null | null |
code/args.py
|
DongjaeJang/Deep-Knowledge-Tracing
|
aab72939a6cbdfc8b7f11bf074040b48771cbf3f
|
[
"Unlicense"
] | null | null | null |
code/args.py
|
DongjaeJang/Deep-Knowledge-Tracing
|
aab72939a6cbdfc8b7f11bf074040b48771cbf3f
|
[
"Unlicense"
] | null | null | null |
import os
import argparse
def parse_args(mode='train'):
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=42, type=int, help='seed')
parser.add_argument('--device', default='cpu', type=str, help='cpu or gpu')
parser.add_argument('--data_dir', default='/opt/ml/input/data/train_dataset', type=str, help='data directory')
parser.add_argument('--asset_dir', default='asset/', type=str, help='data directory')
parser.add_argument('--feature_type', default='cont', type=str, help='feature combination you choose')
parser.add_argument('--file_name', default='train_data.csv', type=str, help='train file name')
parser.add_argument('--model_dir', default='models/', type=str, help='model directory')
parser.add_argument('--model_name', default='model.pt', type=str, help='model file name')
parser.add_argument('--output_dir', default='output/', type=str, help='output directory')
parser.add_argument('--test_file_name', default='test_data.csv', type=str, help='test file name')
parser.add_argument('--max_seq_len', default=40, type=int, help='max sequence length')
parser.add_argument('--num_workers', default=1, type=int, help='number of workers')
# 모델
parser.add_argument('--hidden_dim', default=64, type=int, help='hidden dimension size')
parser.add_argument('--n_layers', default=2, type=int, help='number of layers')
parser.add_argument('--n_heads', default=2, type=int, help='number of heads')
parser.add_argument('--drop_out', default=0.2, type=float, help='drop out rate')
# 훈련
parser.add_argument('--n_epochs', default=30, type=int, help='number of epochs')
parser.add_argument('--batch_size', default=64, type=int, help='batch size')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate')
parser.add_argument('--clip_grad', default=10, type=int, help='clip grad')
parser.add_argument('--patience', default=5, type=int, help='for early stopping')
parser.add_argument('--log_steps', default=50, type=int, help='print log per n steps')
### 중요 ###
parser.add_argument('--model', default='bert', type=str, help='model type')
parser.add_argument('--optimizer', default='adamW', type=str, help='optimizer type')
parser.add_argument('--scheduler', default='plateau', type=str, help='scheduler type')
args = parser.parse_args()
return args
| 47.076923
| 114
| 0.6875
|
955122e9aa5881db5759f81e9ceed867abfe3070
| 62,975
|
py
|
Python
|
src/sage/categories/primer.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/primer.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/primer.py
|
rekhabiswal/sage
|
e8633b09919542a65e7e990c8369fee30c7edefd
|
[
"BSL-1.0"
] | null | null | null |
r"""
Elements, parents, and categories in Sage: a (draft of) primer
.. contents::
:depth: 2
Abstract
========
The purpose of categories in Sage is to translate the mathematical
concept of categories (category of groups, of vector spaces, ...)
into a concrete software engineering design pattern for:
- organizing and promoting generic code
- fostering consistency across the Sage library (naming
conventions, doc, tests)
- embedding more mathematical knowledge into the system
This design pattern is largely inspired from Axiom and its
followers (Aldor, Fricas, MuPAD, ...). It differs from those by:
- blending in the Magma inspired concept of Parent/Element
- being built on top of (and not into) the standard Python object
oriented and class hierarchy mechanism. This did not require
changing the language, and could in principle be implemented in
any language supporting the creation of new classes dynamically.
The general philosophy is that *Building mathematical information
into the system yields more expressive, more conceptual and, at
the end, easier to maintain and faster code* (within a programming
realm; this would not necessarily apply to specialized libraries
like gmp!).
One line pitch for mathematicians
---------------------------------
Categories in Sage provide a library of interrelated bookshelves, with
each bookshelf containing algorithms, tests, documentation, or some
mathematical facts about the objects of a given category (e.g. groups).
One line pitch for programmers
------------------------------
Categories in Sage provide a large hierarchy of abstract classes for
mathematical objects. To keep it maintainable, the inheritance
information between the classes is not hardcoded but instead
reconstructed dynamically from duplication free semantic information.
Introduction: Sage as a library of objects and algorithms
=========================================================
The Sage library, with more than one million lines of code,
documentation, and tests, implements:
- Thousands of different kinds of objects (classes):
Integers, polynomials, matrices, groups, number fields, elliptic
curves, permutations, morphisms, languages, ... and a few racoons ...
- Tens of thousands methods and functions:
Arithmetic, integer and polynomial factorization, pattern matching
on words, ...
Some challenges
---------------
- How to organize this library?
One needs some bookshelves to group together related objects and algorithms.
- How to ensure consistency?
Similar objects should behave similarly::
sage: Permutations(5).cardinality()
120
sage: GL(2,2).cardinality()
6
sage: A=random_matrix(ZZ,6,3,x=7)
sage: L=LatticePolytope(A.rows())
sage: L.npoints() # oops! # random
37
- How to ensure robustness?
- How to reduce duplication?
Example: binary powering::
sage: m = 3
sage: m^8 == m*m*m*m*m*m*m*m == ((m^2)^2)^2
True
::
sage: m=random_matrix(QQ, 4, algorithm='echelonizable', rank=3, upper_bound=60)
sage: m^8 == m*m*m*m*m*m*m*m == ((m^2)^2)^2
True
We want to implement binary powering only once, as *generic* code
that will apply in all cases.
A bit of help from abstract algebra
===================================
The hierarchy of categories
---------------------------
What makes binary powering work in the above examples? In both cases,
we have *a set* endowed with a *multiplicative binary operation* which
is *associative* and which has a unit element. Such a set is called a
*monoid*, and binary powering (to a non-negative power) works generally
for any monoid.
Sage knows about monoids::
sage: Monoids()
Category of monoids
and sure enough, binary powering is defined there::
sage: m._pow_int.__module__
'sage.categories.monoids'
That's our bookshelf! And it's used in many places::
sage: GL(2,ZZ) in Monoids()
True
sage: NN in Monoids()
True
For a less trivial bookshelf we can consider euclidean rings: once we
know how to do euclidean division in some set `R`, we can compute
gcd's in `R` generically using the Euclidean algorithm.
We are in fact very lucky: abstract algebra provides us right away
with a large and robust set of bookshelves which is the result of
centuries of work of mathematicians to identify the important
concepts. This includes for example::
sage: Sets()
Category of sets
sage: Groups()
Category of groups
sage: Rings()
Category of rings
sage: Fields()
Category of fields
sage: HopfAlgebras(QQ)
Category of hopf algebras over Rational Field
Each of the above is called a *category*. It typically specifies what
are the operations on the elements, as well as the axioms satisfied by
those operations. For example the category of groups specifies that a
group is a set endowed with a binary operation (the multiplication)
which is associative and admits a unit and inverses.
Each set in Sage knows which bookshelf of generic algorithms it can
use, that is to which category it belongs::
sage: G = GL(2,ZZ)
sage: G.category()
Category of infinite groups
In fact a group is a semigroup, and Sage knows about this::
sage: Groups().is_subcategory(Semigroups())
True
sage: G in Semigroups()
True
Altogether, our group gets algorithms from a bunch of bookshelves::
sage: G.categories()
[Category of infinite groups, Category of groups, Category of monoids,
...,
Category of magmas,
Category of infinite sets, ...]
Those can be viewed graphically::
sage: g = Groups().category_graph()
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
In case ``dot2tex`` is not available, you can use instead::
sage: g.show(vertex_shape=None, figsize=20)
Here is an overview of all categories in Sage::
sage: g = sage.categories.category.category_graph()
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
Wrap-up: generic algorithms in Sage are organized in a hierarchy of
bookshelves modelled upon the usual hierarchy of categories provided
by abstract algebra.
.. _category-primer-parents-elements-categories:
Elements, Parents, Categories
-----------------------------
.. RUBRIC:: Parent
A *parent* is a Python instance modelling a set of mathematical
elements together with its additional (algebraic) structure.
Examples include the ring of integers, the group `S_3`, the set of
prime numbers, the set of linear maps between two given vector
spaces, and a given finite semigroup.
These sets are often equipped with additional structure: the set
of all integers forms a ring. The main way of encoding this
information is specifying which categories a parent belongs to.
It is completely possible to have different Python instances
modelling the same set of elements. For example, one might want
to consider the ring of integers, or the poset of integers under
their standard order, or the poset of integers under divisibility,
or the semiring of integers under the operations of maximum and
addition. Each of these would be a different instance, belonging
to different categories.
For a given model, there should be a unique instance in Sage
representing that parent::
sage: IntegerRing() is IntegerRing()
True
.. RUBRIC:: Element
An *element* is a Python instance modelling a mathematical element
of a set.
Examples of element include `5` in the integer ring, `x^3 - x` in
the polynomial ring in `x` over the rationals, `4 + O(3^3)` in the
3-adics, the transposition `(1 2)` in `S_3`, and the identity
morphism in the set of linear maps from `\QQ^3` to `\QQ^3`.
Every element in Sage has a parent. The standard idiom in Sage
for creating elements is to create their parent, and then provide
enough data to define the element::
sage: R = PolynomialRing(ZZ, name='x')
sage: R([1,2,3])
3*x^2 + 2*x + 1
One can also create elements using various methods on the parent
and arithmetic of elements::
sage: x = R.gen()
sage: 1 + 2*x + 3*x^2
3*x^2 + 2*x + 1
Unlike parents, elements in Sage are not necessarily unique::
sage: ZZ(5040) is ZZ(5040)
False
Many parents model algebraic structures, and their elements
support arithmetic operations. One often further wants to do
arithmetic by combining elements from different parents: adding
together integers and rationals for example. Sage supports this
feature using coercion (see :mod:`sage.structure.coerce` for more
details).
It is possible for a parent to also have simultaneously the
structure of an element. Consider for example the monoid of all
finite groups, endowed with the Cartesian product operation.
Then, every finite group (which is a parent) is also an element of
this monoid. This is not yet implemented, and the design details
are not yet fixed but experiments are underway in this direction.
.. TODO:: Give a concrete example, typically using :class:`ElementWrapper`.
.. RUBRIC:: Category
A *category* is a Python instance modelling a mathematical category.
Examples of categories include the category of finite semigroups,
the category of all (Python) objects, the category of
`\ZZ`-algebras, and the category of Cartesian products of
`\ZZ`-algebras::
sage: FiniteSemigroups()
Category of finite semigroups
sage: Objects()
Category of objects
sage: Algebras(ZZ)
Category of algebras over Integer Ring
sage: Algebras(ZZ).CartesianProducts()
Category of Cartesian products of algebras over Integer Ring
Mind the 's' in the names of the categories above;
``GroupAlgebra`` and ``GroupAlgebras`` are distinct things.
Every parent belongs to a collection of categories. Moreover,
categories are interrelated by the *super categories*
relation. For example, the category of rings is a super category
of the category of fields, because every field is also a ring.
A category serves two roles:
- to provide a model for the mathematical concept of a category
and the associated structures: homsets, morphisms, functorial
constructions, axioms.
- to organize and promote generic code, naming conventions,
documentation, and tests across similar mathematical structures.
.. RUBRIC:: CategoryObject
Objects of a mathematical category are not necessarily parents.
Parent has a superclass that provides a means of modeling such.
For example, the category of schemes does not have a faithful
forgetful functor to the category of sets, so it does not make
sense to talk about schemes as parents.
.. RUBRIC:: Morphisms, Homsets
As category theorists will expect, *Morphisms* and *Homsets* will
play an ever more important role, as support for them will
improve.
----
Much of the mathematical information in Sage is encoded as relations
between elements and their parents, parents and their categories, and
categories and their super categories::
sage: 1.parent()
Integer Ring
sage: ZZ
Integer Ring
sage: ZZ.category()
Join of Category of euclidean domains
and Category of infinite enumerated sets
and Category of metric spaces
sage: ZZ.categories()
[Join of Category of euclidean domains
and Category of infinite enumerated sets
and Category of metric spaces,
Category of euclidean domains, Category of principal ideal domains,
Category of unique factorization domains, Category of gcd domains,
Category of integral domains, Category of domains,
Category of commutative rings, Category of rings, ...
Category of magmas and additive magmas, ...
Category of monoids, Category of semigroups,
Category of commutative magmas, Category of unital magmas, Category of magmas,
Category of commutative additive groups, ..., Category of additive magmas,
Category of infinite enumerated sets, Category of enumerated sets,
Category of infinite sets, Category of metric spaces,
Category of topological spaces, Category of sets,
Category of sets with partial maps,
Category of objects]
sage: g = EuclideanDomains().category_graph()
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
A bit of help from computer science
===================================
Hierarchy of classes
--------------------
How are the bookshelves implemented in practice?
Sage uses the classical design paradigm of Object Oriented Programming
(OOP). Its fundamental principle is that any object that a program is
to manipulate should be modelled by an *instance* of a *class*. The
class implements:
- a *data structure*: which describes how the object is stored,
- *methods*: which describe the operations on the object.
The instance itself contains the data for the given object, according
to the specified data structure.
Hence, all the objects mentioned above should be instances of some
classes. For example, an integer in Sage is an instance of the class
:class:`Integer` (and it knows about it!)::
sage: i = 12
sage: type(i)
<type 'sage.rings.integer.Integer'>
Applying an operation is generally done by *calling a method*::
sage: i.factor()
2^2 * 3
sage: x = var('x')
sage: p = 6*x^2 + 12*x + 6
sage: type(p)
<type 'sage.symbolic.expression.Expression'>
sage: p.factor()
6*(x + 1)^2
sage: R.<x> = PolynomialRing(QQ, sparse=True)
sage: pQ = R ( p )
sage: type(pQ)
<class 'sage.rings.polynomial.polynomial_ring.PolynomialRing_field_with_category.element_class'>
sage: pQ.factor()
(6) * (x + 1)^2
sage: pZ = ZZ['x'] ( p )
sage: type(pZ)
<type 'sage.rings.polynomial.polynomial_integer_dense_flint.Polynomial_integer_dense_flint'>
sage: pZ.factor()
2 * 3 * (x + 1)^2
Factoring integers, expressions, or polynomials are distinct tasks,
with completely different algorithms. Yet, from a user (or caller)
point of view, all those objects can be manipulated alike. This
illustrates the OOP concepts of *polymorphism*, *data abstraction*,
and *encapsulation*.
Let us be curious, and see where some methods are defined. This can be
done by introspection::
sage: i._mul_?? # not tested
For plain Python methods, one can also just ask in which module they
are implemented::
sage: i._pow_.__module__ # not tested (Trac #24275)
'sage.categories.semigroups'
sage: pQ._mul_.__module__
'sage.rings.polynomial.polynomial_element_generic'
sage: pQ._pow_.__module__ # not tested (Trac #24275)
'sage.categories.semigroups'
We see that integers and polynomials have each their own
multiplication method: the multiplication algorithms are indeed
unrelated and deeply tied to their respective datastructures. On the
other hand, as we have seen above, they share the same powering method
because the set `\ZZ` of integers, and the set `\QQ[x]` of
polynomials are both semigroups. Namely, the class for integers and
the class for polynomials both derive from an *abstract class* for
semigroup elements, which factors out the *generic* methods like
``_pow_``. This illustrates the use of *hierarchy of classes* to share
common code between classes having common behaviour.
OOP design is all about isolating the objects that one wants to model
together with their operations, and designing an appropriate hierarchy
of classes for organizing the code. As we have seen above, the design
of the class hierarchy is easy since it can be modelled upon the
hierarchy of categories (bookshelves). Here is for example a piece of
the hierarchy of classes for an element of a group of permutations::
sage: P = Permutations(4)
sage: m = P.an_element()
sage: for cls in m.__class__.mro(): print(cls)
<class 'sage.combinat.permutation.StandardPermutations_n_with_category.element_class'>
<class 'sage.combinat.permutation.StandardPermutations_n.Element'>
<class 'sage.combinat.permutation.Permutation'>
...
<class 'sage.categories.groups.Groups.element_class'>
<class 'sage.categories.monoids.Monoids.element_class'>
...
<class 'sage.categories.semigroups.Semigroups.element_class'>
...
On the top, we see concrete classes that describe the data structure
for matrices and provide the operations that are tied to this data
structure. Then follow abstract classes that are attached to the
hierarchy of categories and provide generic algorithms.
The full hierarchy is best viewed graphically::
sage: g = class_graph(m.__class__)
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
Parallel hierarchy of classes for parents
-----------------------------------------
Let us recall that we do not just want to compute with elements of
mathematical sets, but with the sets themselves::
sage: ZZ.one()
1
sage: R = QQ['x,y']
sage: R.krull_dimension()
2
sage: A = R.quotient( R.ideal(x^2 - 2) )
sage: A.krull_dimension() # todo: not implemented
Here are some typical operations that one may want to carry on various
kinds of sets:
- The set of permutations of 5, the set of rational points of an
elliptic curve: counting, listing, random generation
- A language (set of words): rationality testing, counting elements,
generating series
- A finite semigroup: left/right ideals, center, representation theory
- A vector space, an algebra: Cartesian product, tensor product, quotient
Hence, following the OOP fundamental principle, parents should also be
modelled by instances of some (hierarchy of) classes. For example, our
group `G` is an instance of the following class::
sage: G = GL(2,ZZ)
sage: type(G)
<class 'sage.groups.matrix_gps.linear.LinearMatrixGroup_gap_with_category'>
Here is a piece of the hierarchy of classes above it::
sage: for cls in G.__class__.mro(): print(cls)
<class 'sage.groups.matrix_gps.linear.LinearMatrixGroup_gap_with_category'>
...
<class 'sage.categories.groups.Groups.parent_class'>
<class 'sage.categories.monoids.Monoids.parent_class'>
<class 'sage.categories.semigroups.Semigroups.parent_class'>
...
Note that the hierarchy of abstract classes is again attached to
categories and parallel to that we had seen for the elements. This is
best viewed graphically::
sage: g = class_graph(m.__class__)
sage: g.relabel(lambda x: x.replace("_","\_"))
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
.. NOTE::
This is a progress upon systems like Axiom or MuPAD where a parent
is modelled by the class of its elements; this oversimplification
leads to confusion between methods on parents and elements, and
makes parents special; in particular it prevents potentially
interesting constructions like "groups of groups".
Sage categories
===============
Why this business of categories? And to start with, why don't we just
have a good old hierarchy of classes ``Group``, ``Semigroup``,
``Magma``, ... ?
Dynamic hierarchy of classes
----------------------------
As we have just seen, when we manipulate groups, we actually
manipulate several kinds of objects:
- groups
- group elements
- morphisms between groups
- and even the category of groups itself!
Thus, on the group bookshelf, we want to put generic code for each of
the above. We therefore need three, parallel hierarchies of abstract
classes:
- Group, Monoid, Semigroup, Magma, ...
- GroupElement, MonoidElement, SemigroupElement, MagmaElement, ...
- GroupMorphism, SemigroupElement, SemigroupMorphism, MagmaMorphism, ...
(and in fact many more as we will see).
We could implement the above hierarchies as usual::
class Group(Monoid):
# generic methods that apply to all groups
class GroupElement(MonoidElement):
# generic methods that apply to all group elements
class GroupMorphism(MonoidMorphism):
# generic methods that apply to all group morphisms
And indeed that's how it was done in Sage before 2009, and there are
still many traces of this. The drawback of this approach is
duplication: the fact that a group is a monoid is repeated three times
above!
Instead, Sage now uses the following syntax, where the :class:`Groups`
bookshelf is structured into units with *nested classes*::
class Groups(Category):
def super_categories(self):
return [Monoids(), ...]
class ParentMethods:
# generic methods that apply to all groups
class ElementMethods:
# generic methods that apply to all group elements
class MorphismMethods:
# generic methods that apply to all group morphisms (not yet implemented)
class SubcategoryMethods:
# generic methods that apply to all subcategories of Groups()
With this syntax, the information that a group is a monoid is
specified only once, in the :meth:`Category.super_categories`
method. And indeed, when the category of inverse unital magmas was
introduced, there was a *single point of truth* to update in order to
reflect the fact that a group is an inverse unital magma::
sage: Groups().super_categories()
[Category of monoids, Category of inverse unital magmas]
The price to pay (there is no free lunch) is that some magic is
required to construct the actual hierarchy of classes for parents,
elements, and morphisms. Namely, ``Groups.ElementMethods`` should be
seen as just a bag of methods, and the actual class
``Groups().element_class`` is constructed from it by adding the
appropriate super classes according to
``Groups().super_categories()``::
sage: Groups().element_class
<class 'sage.categories.groups.Groups.element_class'>
sage: Groups().element_class.__bases__
(<class 'sage.categories.monoids.Monoids.element_class'>,
<class 'sage.categories.magmas.Magmas.Unital.Inverse.element_class'>)
We now see that the hierarchy of classes for parents and elements is
parallel to the hierarchy of categories::
sage: Groups().all_super_categories()
[Category of groups,
Category of monoids,
Category of semigroups,
...
Category of magmas,
Category of sets,
...]
sage: for cls in Groups().element_class.mro(): print(cls)
<class 'sage.categories.groups.Groups.element_class'>
<class 'sage.categories.monoids.Monoids.element_class'>
<class 'sage.categories.semigroups.Semigroups.element_class'>
...
<class 'sage.categories.magmas.Magmas.element_class'>
...
sage: for cls in Groups().parent_class.mro(): print(cls)
<class 'sage.categories.groups.Groups.parent_class'>
<class 'sage.categories.monoids.Monoids.parent_class'>
<class 'sage.categories.semigroups.Semigroups.parent_class'>
...
<class 'sage.categories.magmas.Magmas.parent_class'>
...
Another advantage of building the hierarchy of classes dynamically is
that, for parametrized categories, the hierarchy may depend on the
parameters. For example an algebra over `\QQ` is a `\QQ`-vector space,
but an algebra over `\ZZ` is not (it is just a `\ZZ`-module)!
.. NOTE::
At this point this whole infrastructure may feel like
overdesigning, right? We felt like this too! But we will see later
that, once one gets used to it, this approach scales very
naturally.
From a computer science point of view, this infrastructure
implements, on top of standard multiple inheritance, a dynamic
composition mechanism of mixin classes (:wikipedia:`Mixin`),
governed by mathematical properties.
For implementation details on how the hierarchy of classes for
parents and elements is constructed, see :class:`Category`.
.. _category-primer-subcategory:
On the category hierarchy: subcategories and super categories
-------------------------------------------------------------
We have seen above that, for example, the category of sets is a super
category of the category of groups. This models the fact that a group
can be unambiguously considered as a set by forgetting its group
operation. In object-oriented parlance, we want the relation "a group
*is a* set", so that groups can directly inherit code implemented on
sets.
Formally, a category ``Cs()`` is a *super category* of a category
``Ds()`` if Sage considers any object of ``Ds()`` to be an object of
``Cs()``, up to an implicit application of a canonical functor from
``Ds()`` to ``Cs()``. This functor is normally an inclusion of
categories or a forgetful functor. Reciprocally, ``Ds()`` is said to
be a *subcategory* of ``Cs()``.
.. WARNING::
This terminology deviates from the usual mathematical definition
of *subcategory* and is subject to change. Indeed, the forgetful
functor from the category of groups to the category of sets is not
an inclusion of categories, as it is not injective: a given set
may admit more than one group structure. See :trac:`16183` for
more details. The name *supercategory* is also used with a
different meaning in certain areas of mathematics.
Categories are instances and have operations
--------------------------------------------
Note that categories themselves are naturally modelled by instances
because they can have operations of their own. An important one is::
sage: Groups().example()
General Linear Group of degree 4 over Rational Field
which gives an example of object of the category. Besides illustrating
the category, the example provides a minimal template for implementing
a new object in the category::
sage: S = Semigroups().example(); S
An example of a semigroup: the left zero semigroup
Its source code can be obtained by introspection::
sage: S?? # not tested
This example is also typically used for testing generic methods. See
:meth:`Category.example` for more.
Other operations on categories include querying the super categories
or the axioms satisfied by the operations of a category::
sage: Groups().super_categories()
[Category of monoids, Category of inverse unital magmas]
sage: Groups().axioms()
frozenset({'Associative', 'Inverse', 'Unital'})
or constructing the intersection of two categories, or the smallest
category containing them::
sage: Groups() & FiniteSets()
Category of finite groups
sage: Algebras(QQ) | Groups()
Category of monoids
Specifications and generic documentation
----------------------------------------
Categories do not only contain code but also the specifications of the
operations. In particular a list of mandatory and optional methods to
be implemented can be found by introspection with::
sage: Groups().required_methods()
{'element': {'optional': ['_mul_'], 'required': []},
'parent': {'optional': [], 'required': ['__contains__']}}
Documentation about those methods can be obtained with::
sage: G = Groups()
sage: G.element_class._mul_? # not tested
sage: G.parent_class.one? # not tested
See also the :func:`abstract_method` decorator.
.. WARNING::
Well, more precisely, that's how things should be, but there is
still some work to do in this direction. For example, the inverse
operation is not specified above. Also, we are still missing a
good programmatic syntax to specify the input and output types of
the methods. Finally, in many cases the implementer must provide
at least one of two methods, each having a default implementation
using the other one (e.g. listing or iterating for a finite
enumerated set); there is currently no good programmatic way to
specify this.
Generic tests
-------------
Another feature that parents and elements receive from categories is
generic tests; their purpose is to check (at least to some extent)
that the parent satisfies the required mathematical properties (is my
semigroup indeed associative?) and is implemented according to the
specifications (does the method ``an_element`` indeed return an
element of the parent?)::
sage: S = FiniteSemigroups().example(alphabet=('a', 'b'))
sage: TestSuite(S).run(verbose = True)
running ._test_an_element() . . . pass
running ._test_associativity() . . . pass
running ._test_cardinality() . . . pass
running ._test_category() . . . pass
running ._test_elements() . . .
Running the test suite of self.an_element()
running ._test_category() . . . pass
running ._test_eq() . . . pass
running ._test_new() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
pass
running ._test_elements_eq_reflexive() . . . pass
running ._test_elements_eq_symmetric() . . . pass
running ._test_elements_eq_transitive() . . . pass
running ._test_elements_neq() . . . pass
running ._test_enumerated_set_contains() . . . pass
running ._test_enumerated_set_iter_cardinality() . . . pass
running ._test_enumerated_set_iter_list() . . . pass
running ._test_eq() . . . pass
running ._test_new() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
running ._test_some_elements() . . . pass
Tests can be run individually::
sage: S._test_associativity()
Here is how to access the code of this test::
sage: S._test_associativity?? # not tested
Here is how to run the test on all elements::
sage: L = S.list()
sage: S._test_associativity(elements=L)
See :class:`TestSuite` for more information.
Let us see what happens when a test fails. Here we redefine the
product of `S` to something definitely not associative::
sage: S.product = lambda x, y: S("("+x.value +y.value+")")
And rerun the test::
sage: S._test_associativity(elements=L)
Traceback (most recent call last):
...
File ".../sage/categories/semigroups.py", line ..., in _test_associativity
tester.assertTrue((x * y) * z == x * (y * z))
...
AssertionError: False is not true
We can recover instantly the actual values of ``x``, ``y``, ``z``, that is,
a counterexample to the associativity of our broken semigroup, using post
mortem introspection with the Python debugger ``pdb`` (this does not
work yet in the notebook)::
sage: import pdb
sage: pdb.pm() # not tested
> /opt/sage-5.11.rc1/local/lib/python/unittest/case.py(424)assertTrue()
-> raise self.failureException(msg)
(Pdb) u
> /opt/sage-5.11.rc1/local/lib/python2.7/site-packages/sage/categories/semigroups.py(145)_test_associativity()
-> tester.assertTrue((x * y) * z == x * (y * z))
(Pdb) p x, y, z
('a', 'a', 'a')
(Pdb) p (x * y) * z
'((aa)a)'
(Pdb) p x * (y * z)
'(a(aa))'
Wrap-up
-------
- Categories provide a natural hierarchy of bookshelves to organize
not only code, but also specifications and testing tools.
- Everything about, say, algebras with a distinguished basis is
gathered in :class:`AlgebrasWithBasis` or its super categories.
This includes properties and algorithms for elements, parents,
morphisms, but also, as we will see, for constructions like
Cartesian products or quotients.
- The mathematical relations between elements, parents, and categories
translate dynamically into a traditional hierarchy of classes.
- This design enforces robustness and consistency, which is
particularly welcome given that Python is an interpreted language
without static type checking.
Case study
==========
In this section, we study an existing parent in detail; a good followup is to
go through the :mod:`sage.categories.tutorial` or the thematic tutorial on
coercion and categories ("How to implement new algebraic structures in Sage")
to learn how to implement a new one!
We consider the example of finite semigroup provided by the category::
sage: S = FiniteSemigroups().example(); S
An example of a finite semigroup: the left regular band generated by ('a', 'b', 'c', 'd')
sage: S? # not tested
Where do all the operations on ``S`` and its elements come from?
::
sage: x = S('a')
``_repr_`` is a technical method which comes with the data structure
(:class:`ElementWrapper`); since it's implemented in Cython, we need
to use Sage's introspection tools to recover where it's implemented::
sage: x._repr_.__module__
sage: sage.misc.sageinspect.sage_getfile(x._repr_)
'.../sage/structure/element_wrapper.pyx'
``_pow_int`` is a generic method for all finite semigroups::
sage: x._pow_int.__module__
'sage.categories.semigroups'
``__mul__`` is a generic method provided by the :class:`Magmas`
category (a *magma* is a set with an inner law `*`, not necessarily
associative). If the two arguments are in the same parent, it will
call the method ``_mul_``, and otherwise let the :mod:`coercion model
<sage.structure.coerce>` try to discover how to do the
multiplication::
sage: x.__mul__?? # not tested
Since it is a speed critical method, it is implemented in Cython
in a separate file::
sage: x._mul_.__module__
'sage.categories.coercion_methods'
``_mul_`` is a default implementation, also provided by the
:class:`Magmas` category, that delegates the work to the method
``product`` of the parent (following the advice: if you do not know
what to do, ask your parent); it's also a speed critical method::
sage: x._mul_?? # not tested
sage: x._mul_.__module__
'sage.categories.coercion_methods'
sage: from six import get_method_function as gmf
sage: gmf(x._mul_) is gmf(Magmas.ElementMethods._mul_parent)
True
``product`` is a mathematical method implemented by the parent::
sage: S.product.__module__
'sage.categories.examples.finite_semigroups'
``cayley_graph`` is a generic method on the parent, provided by the
:class:`FiniteSemigroups` category::
sage: S.cayley_graph.__module__
'sage.categories.semigroups'
``multiplication_table`` is a generic method on the parent, provided
by the :class:`Magmas` category (it does not require associativity)::
sage: S.multiplication_table.__module__
'sage.categories.magmas'
Consider now the implementation of the semigroup::
sage: S?? # not tested
This implementation specifies a data structure for the parents and the
elements, and makes a promise: the implemented parent is a finite
semigroup. Then it fulfills the promise by implementing the basic
operation ``product``. It also implements the optional method
``semigroup_generators``. In exchange, `S` and its elements receive
generic implementations of all the other operations. `S` may override
any of those by more efficient ones. It may typically implement the
element method ``is_idempotent`` to always return ``True``.
A (not yet complete) list of mandatory and optional methods to be
implemented can be found by introspection with::
sage: FiniteSemigroups().required_methods()
{'element': {'optional': ['_mul_'], 'required': []},
'parent': {'optional': ['semigroup_generators'],
'required': ['__contains__']}}
``product`` does not appear in the list because a default implementation
is provided in term of the method ``_mul_`` on elements. Of course, at
least one of them should be implemented. On the other hand, a default
implementation for ``__contains__`` is provided by :class:`Parent`.
Documentation about those methods can be obtained with::
sage: C = FiniteSemigroups().element_class
sage: C._mul_? # not tested
See also the :func:`~sage.misc.abstract_method.abstract_method` decorator.
Here is the code for the finite semigroups category::
sage: FiniteSemigroups?? # not tested
Specifying the category of a parent
===================================
Some parent constructors (not enough!) allow to specify the desired
category for the parent. This can typically be used to specify
additional properties of the parent that we know to hold a priori. For
example, permutation groups are by default in the category of finite
permutation groups (no surprise)::
sage: P = PermutationGroup([[(1,2,3)]]); P
Permutation Group with generators [(1,2,3)]
sage: P.category()
Category of finite enumerated permutation groups
In this case, the group is commutative, so we can specify this::
sage: P = PermutationGroup([[(1,2,3)]], category=PermutationGroups().Finite().Commutative()); P
Permutation Group with generators [(1,2,3)]
sage: P.category()
Category of finite enumerated commutative permutation groups
This feature can even be used, typically in experimental code, to add
more structure to existing parents, and in particular to add methods
for the parents or the elements, without touching the code base::
sage: class Foos(Category):
....: def super_categories(self):
....: return [PermutationGroups().Finite().Commutative()]
....: class ParentMethods:
....: def foo(self): print("foo")
....: class ElementMethods:
....: def bar(self): print("bar")
sage: P = PermutationGroup([[(1,2,3)]], category=Foos())
sage: P.foo()
foo
sage: p = P.an_element()
sage: p.bar()
bar
In the long run, it would be thinkable to use this idiom to implement
forgetful functors; for example the above group could be constructed
as a plain set with::
sage: P = PermutationGroup([[(1,2,3)]], category=Sets()) # todo: not implemented
At this stage though, this is still to be explored for robustness
and practicality. For now, most parents that accept a category argument
only accept a subcategory of the default one.
Scaling further: functorial constructions, axioms, ...
======================================================
In this section, we explore more advanced features of categories.
Along the way, we illustrate that a large hierarchy of categories is
desirable to model complicated mathematics, and that scaling to
support such a large hierarchy is the driving motivation for the
design of the category infrastructure.
.. _category-primer-functorial-constructions:
Functorial constructions
------------------------
Sage has support for a certain number of so-called *covariant
functorial constructions* which can be used to construct new parents
from existing ones while carrying over as much as possible of their
algebraic structure. This includes:
- Cartesian products:
See :const:`~sage.categories.cartesian_product.cartesian_product`.
- Tensor products:
See :const:`~sage.categories.tensor.tensor`.
- Subquotients / quotients / subobjects / isomorphic objects:
See:
- :meth:`Sets().Subquotients <Sets.SubcategoryMethods.Subquotients>`,
- :meth:`Sets().Quotients <Sets.SubcategoryMethods.Quotients>`,
- :meth:`Sets().Subobjects <Sets.SubcategoryMethods.Subobjects>`,
- :meth:`Sets().IsomorphicObjects <Sets.SubcategoryMethods.IsomorphicObjects>`
- Dual objects:
See :meth:`Modules().DualObjects <Modules.SubcategoryMethods.DualObjects>`.
- Algebras, as in group algebras, monoid algebras, ...:
See: :meth:`Sets.ParentMethods.algebras`.
Let for example `A` and `B` be two parents, and let us construct the
Cartesian product `A \times B \times B`::
sage: A = AlgebrasWithBasis(QQ).example(); A.rename("A")
sage: B = HopfAlgebrasWithBasis(QQ).example(); B.rename("B")
sage: C = cartesian_product([A, B, B]); C
A (+) B (+) B
In which category should this new parent be? Since `A` and `B` are
vector spaces, the result is, as a vector space, the direct sum
`A \oplus B \oplus B`, hence the notation. Also, since both `A` and `B`
are monoids, `A \times B \times B` is naturally endowed with a monoid
structure for pointwise multiplication::
sage: C in Monoids()
True
the unit being the Cartesian product of the units of the operands::
sage: C.one()
B[(0, word: )] + B[(1, ())] + B[(2, ())]
sage: cartesian_product([A.one(), B.one(), B.one()])
B[(0, word: )] + B[(1, ())] + B[(2, ())]
The pointwise product can be implemented generically for all magmas
(i.e. sets endowed with a multiplicative operation) that are
constructed as Cartesian products. It's thus implemented in the
:class:`Magmas` category::
sage: C.product.__module__
'sage.categories.magmas'
More specifically, keeping on using nested classes to structure the
code, the product method is put in the nested class
:class:`Magmas.CartesianProducts.ParentMethods`::
class Magmas(Category):
class ParentMethods:
# methods for magmas
class ElementMethods:
# methods for elements of magmas
class CartesianProduct(CartesianProductCategory):
class ParentMethods:
# methods for magmas that are constructed as Cartesian products
def product(self, x, y):
# ...
class ElementMethods:
# ...
.. NOTE::
The support for nested classes in Python is relatively
recent. Their intensive use for the category infrastructure did
reveal some glitches in their implementation, in particular around
class naming and introspection. Sage currently works around the
more annoying ones but some remain visible. See
e.g. :mod:`sage.misc.nested_class_test`.
Let us now look at the categories of ``C``::
sage: C.categories()
[Category of finite dimensional Cartesian products of algebras with basis over Rational Field, ...
Category of Cartesian products of algebras over Rational Field, ...
Category of Cartesian products of semigroups, Category of semigroups, ...
Category of Cartesian products of magmas, ..., Category of magmas, ...
Category of Cartesian products of additive magmas, ..., Category of additive magmas,
Category of Cartesian products of sets, Category of sets, ...]
This reveals the parallel hierarchy of categories for Cartesian
products of semigroups magmas, ... We are thus glad that Sage uses
its knowledge that a monoid is a semigroup to automatically deduce
that a Cartesian product of monoids is a Cartesian product of
semigroups, and build the hierarchy of classes for parents and
elements accordingly.
In general, the Cartesian product of `A` and `B` can potentially be an
algebra, a coalgebra, a differential module, and be finite
dimensional, or graded, or .... This can only be decided at runtime,
by introspection into the properties of `A` and `B`; furthermore, the
number of possible combinations (e.g. finite dimensional differential
algebra) grows exponentially with the number of properties.
.. _category-primer-axioms:
Axioms
------
First examples
^^^^^^^^^^^^^^
We have seen that Sage is aware of the axioms satisfied by, for
example, groups::
sage: Groups().axioms()
frozenset({'Associative', 'Inverse', 'Unital'})
In fact, the category of groups can be *defined* by stating that a
group is a magma, that is a set endowed with an internal binary
multiplication, which satisfies the above axioms. Accordingly, we can
construct the category of groups from the category of magmas::
sage: Magmas().Associative().Unital().Inverse()
Category of groups
In general, we can construct new categories in Sage by specifying the
axioms that are satisfied by the operations of the super
categories. For example, starting from the category of magmas, we can
construct all the following categories just by specifying the axioms
satisfied by the multiplication::
sage: Magmas()
Category of magmas
sage: Magmas().Unital()
Category of unital magmas
::
sage: Magmas().Commutative().Unital()
Category of commutative unital magmas
sage: Magmas().Unital().Commutative()
Category of commutative unital magmas
::
sage: Magmas().Associative()
Category of semigroups
::
sage: Magmas().Associative().Unital()
Category of monoids
::
sage: Magmas().Associative().Unital().Commutative()
Category of commutative monoids
::
sage: Magmas().Associative().Unital().Inverse()
Category of groups
Axioms and categories with axioms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here, ``Associative``, ``Unital``, ``Commutative`` are axioms. In
general, any category ``Cs`` in Sage can declare a new axiom
``A``. Then, the *category with axiom* ``Cs.A()`` models the
subcategory of the objects of ``Cs`` satisfying the axiom
``A``. Similarly, for any subcategory ``Ds`` of ``Cs``, ``Ds.A()``
models the subcategory of the objects of ``Ds`` satisfying the axiom
``A``. In most cases, it's a *full subcategory* (see
:wikipedia:`Subcategory`).
For example, the category of sets defines the ``Finite`` axiom, and
this axiom is available in the subcategory of groups::
sage: Sets().Finite()
Category of finite sets
sage: Groups().Finite()
Category of finite groups
The meaning of each axiom is described in the documentation of the
corresponding method, which can be obtained as usual by
instrospection::
sage: C = Groups()
sage: C.Finite? # not tested
The purpose of categories with axioms is no different from other
categories: to provide bookshelves of code, documentation,
mathematical knowledge, tests, for their objects. The extra feature is
that, when intersecting categories, axioms are automatically combined
together::
sage: C = Magmas().Associative() & Magmas().Unital().Inverse() & Sets().Finite(); C
Category of finite groups
sage: sorted(C.axioms())
['Associative', 'Finite', 'Inverse', 'Unital']
For a more advanced example, Sage knows that a ring is a set `C`
endowed with a multiplication which distributes over addition, such
that `(C, +)` is a commutative additive group and `(C, *)` is a monoid::
sage: C = (CommutativeAdditiveGroups() & Monoids()).Distributive(); C
Category of rings
sage: sorted(C.axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Associative', 'Distributive', 'Unital']
The infrastructure allows for specifying further deduction rules, in
order to encode mathematical facts like Wedderburn's theorem::
sage: DivisionRings() & Sets().Finite()
Category of finite enumerated fields
.. NOTE::
When an axiom specifies the properties of some operations in Sage,
the notations for those operations are tied to this axiom. For
example, as we have seen above, we need two distinct axioms for
associativity: the axiom "AdditiveAssociative" is about the
properties of the addition `+`, whereas the axiom "Associative" is
about the properties of the multiplication `*`.
We are touching here an inherent limitation of the current
infrastructure. There is indeed no support for providing generic
code that is independent of the notations. In particular, the
category hierarchy about additive structures (additive monoids,
additive groups, ...) is completely duplicated by that for
multiplicative structures (monoids, groups, ...).
As far as we know, none of the existing computer algebra systems
has a good solution for this problem. The difficulty is that this
is not only about a single notation but a bunch of operators and
methods: ``+, -, zero, summation, sum, ...`` in one case, ``*, /,
one, product, prod, factor, ...`` in the other. Sharing something
between the two hierarchies of categories would only be useful if
one could write generic code that applies in both cases; for that
one needs to somehow automatically substitute the right operations
in the right spots in the code. That's kind of what we are doing
manually between
e.g. :meth:`AdditiveMagmas.ParentMethods.addition_table` and
:meth:`Magmas.ParentMethods.multiplication_table`, but doing this
systematically is a different beast from what we have been doing
so far with just usual inheritance.
.. _category-primer-axioms-single-entry-point:
Single entry point and name space usage
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A nice feature of the notation ``Cs.A()`` is that, from a single entry
point (say the category :class:`Magmas` as above), one can explore a
whole range of related categories, typically with the help of
introspection to discover which axioms are available, and without
having to import new Python modules. This feature will be used in
:trac:`15741` to unclutter the global name space from, for example,
the many variants of the category of algebras like::
sage: FiniteDimensionalAlgebrasWithBasis(QQ)
Category of finite dimensional algebras with basis over Rational Field
There will of course be a deprecation step, but it's recommended to
prefer right away the more flexible notation::
sage: Algebras(QQ).WithBasis().FiniteDimensional()
Category of finite dimensional algebras with basis over Rational Field
.. TOPIC:: Design discussion
How far should this be pushed? :class:`Fields` should definitely
stay, but should :class:`FiniteGroups` or :class:`DivisionRings`
be removed from the global namespace? Do we want to further
completely deprecate the notation ``FiniteGroups()` in favor of
``Groups().Finite()``?
.. _category-primer-axioms-explosion:
On the potential combinatorial explosion of categories with axioms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Even for a very simple category like ``Magmas``, there are about `2^5`
potential combinations of the axioms! Think about what this becomes
for a category with two operations `+` and `*`::
sage: C = (Magmas() & AdditiveMagmas()).Distributive(); C
Category of distributive magmas and additive magmas
sage: C.Associative().AdditiveAssociative().AdditiveCommutative().AdditiveUnital().AdditiveInverse()
Category of rngs
sage: C.Associative().AdditiveAssociative().AdditiveCommutative().AdditiveUnital().Unital()
Category of semirings
sage: C.Associative().AdditiveAssociative().AdditiveCommutative().AdditiveUnital().AdditiveInverse().Unital()
Category of rings
sage: Rings().Division()
Category of division rings
sage: Rings().Division().Commutative()
Category of fields
sage: Rings().Division().Finite()
Category of finite enumerated fields
or for more advanced categories::
sage: g = HopfAlgebras(QQ).WithBasis().Graded().Connected().category_graph()
sage: g.set_latex_options(format="dot2tex")
sage: view(g) # not tested
Difference between axioms and regressive covariant functorial constructions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Our running examples here will be the axiom ``FiniteDimensional`` and
the regressive covariant functorial construction ``Graded``. Let
``Cs`` be some subcategory of ``Modules``, say the category of modules
itself::
sage: Cs = Modules(QQ)
Then, ``Cs.FiniteDimensional()`` (respectively ``Cs.Graded()``) is the
subcategory of the objects ``O`` of ``Cs`` which are finite
dimensional (respectively graded).
Let also ``Ds`` be a subcategory of ``Cs``, say::
sage: Ds = Algebras(QQ)
A finite dimensional algebra is also a finite dimensional module::
sage: Algebras(QQ).FiniteDimensional().is_subcategory( Modules(QQ).FiniteDimensional() )
True
Similarly a graded algebra is also a graded module::
sage: Algebras(QQ).Graded().is_subcategory( Modules(QQ).Graded() )
True
This is the *covariance* property: for ``A`` an axiom or a covariant
functorial construction, if ``Ds`` is a subcategory of ``Cs``, then
``Ds.A()`` is a subcategory of ``Cs.A()``.
What happens if we consider reciprocally an object of ``Cs.A()`` which
is also in ``Ds``? A finite dimensional module which is also an
algebra is a finite dimensional algebra::
sage: Modules(QQ).FiniteDimensional() & Algebras(QQ)
Category of finite dimensional algebras over Rational Field
On the other hand, a graded module `O` which is also an algebra is not
necessarily a graded algebra! Indeed, the grading on `O` may not be
compatible with the product on `O`::
sage: Modules(QQ).Graded() & Algebras(QQ)
Join of Category of algebras over Rational Field and Category of graded modules over Rational Field
The relevant difference between ``FiniteDimensional`` and ``Graded``
is that ``FiniteDimensional`` is a statement about the properties of
``O`` seen as a module (and thus does not depend on the given
category), whereas ``Graded`` is a statement about the properties of
``O`` and all its operations in the given category.
In general, if a category satisfies a given axiom, any subcategory
also satisfies that axiom. Another formulation is that, for an axiom
``A`` defined in a super category ``Cs`` of ``Ds``, ``Ds.A()`` is the
intersection of the categories ``Ds`` and ``Cs.A()``::
sage: As = Algebras(QQ).FiniteDimensional(); As
Category of finite dimensional algebras over Rational Field
sage: Bs = Algebras(QQ) & Modules(QQ).FiniteDimensional(); As
Category of finite dimensional algebras over Rational Field
sage: As is Bs
True
An immediate consequence is that, as we have already noticed, axioms
commute::
sage: As = Algebras(QQ).FiniteDimensional().WithBasis(); As
Category of finite dimensional algebras with basis over Rational Field
sage: Bs = Algebras(QQ).WithBasis().FiniteDimensional(); Bs
Category of finite dimensional algebras with basis over Rational Field
sage: As is Bs
True
On the other hand, axioms do not necessarily commute with functorial
constructions, even if the current printout may missuggest so::
sage: As = Algebras(QQ).Graded().WithBasis(); As
Category of graded algebras with basis over Rational Field
sage: Bs = Algebras(QQ).WithBasis().Graded(); Bs
Category of graded algebras with basis over Rational Field
sage: As is Bs
False
This is because ``Bs`` is the category of algebras endowed with basis,
which are further graded; in particular the basis must respect the
grading (i.e. be made of homogeneous elements). On the other hand,
``As`` is the category of graded algebras, which are further endowed
with some basis; that basis need not respect the grading. In fact
``As`` is really a join category::
sage: type(As)
<class 'sage.categories.category.JoinCategory_with_category'>
sage: As._repr_(as_join=True)
'Join of Category of algebras with basis over Rational Field and Category of graded algebras over Rational Field'
.. TODO::
Improve the printing of functorial constructions and joins to
raise this potentially dangerous ambiguity.
Further reading on axioms
^^^^^^^^^^^^^^^^^^^^^^^^^
We refer to :mod:`sage.categories.category_with_axiom` for how to
implement axioms.
Wrap-up
-------
As we have seen, there is a combinatorial explosion of possible
classes. Constructing by hand the full class hierarchy would not scale
unless one would restrict to a very rigid subset. Even if it was
possible to construct automatically the full hierarchy, this would not
scale with respect to system resources.
When designing software systems with large hierarchies of abstract
classes for business objects, the difficulty is usually to identify a
proper set of key concepts. Here we are lucky, as the key concepts
have been long identified and are relatively few:
- Operations (`+`, `*`, ...)
- Axioms on those operations (associativity, ...)
- Constructions (Cartesian products, ...)
Better, those concepts are sufficiently well known so that a user can
reasonably be expected to be familiar with the concepts that are
involved for his own needs.
Instead, the difficulty is concentrated in the huge number of possible
combinations, an unpredictable large subset of which being potentially
of interest; at the same time, only a small -- but moving -- subset
has code naturally attached to it.
This has led to the current design, where one focuses on writing the
relatively few classes for which there is actual code or mathematical
information, and lets Sage *compose dynamically and lazily* those
building blocks to construct the minimal hierarchy of classes needed
for the computation at hand. This allows for the infrastructure to
scale smoothly as bookshelves are added, extended, or reorganized.
Writing a new category
======================
Each category `C` **must** be provided with a method
``C.super_categories()`` and *can* be provided with a method
``C._subcategory_hook_(D)``. Also, it may be needed to insert `C` into
the output of the ``super_categories()`` method of some other
category. This determines the position of `C` in the category graph.
A category *may* provide methods that can be used by all its objects,
respectively by all elements of its objects.
Each category *should* come with a good example, in
:mod:`sage.categories.examples`.
Inserting the new category into the category graph
--------------------------------------------------
``C.super_categories()`` *must* return a list of categories, namely
the *immediate* super categories of `C`. Of course, if you know that
your new category `C` is an immediate super category of some existing
category `D`, then you should also update the method
``D.super_categories`` to include `C`.
The immediate super categories of `C` *should not* be :class:`join
categories <.category.JoinCategory>`. Furthermore, one always should have::
Cs().is_subcategory( Category.join(Cs().super_categories()) )
Cs()._cmp_key > other._cmp_key for other in Cs().super_categories()
This is checked by :meth:`~sage.categories.category.Category._test_category`.
In several cases, the category `C` is directly provided with a generic
implementation of ``super_categories``; a typical example is when `C`
implements an axiom or a functorial construction; in such a case, `C`
may implement ``C.extra_super_categories()`` to complement the super
categories discovered by the generic implementation. This method needs
not return immediate super categories; instead it's usually best to
specify the largest super category providing the desired mathematical
information. For example, the category
:class:`Magmas.Commutative.Algebras` just states that the algebra of a
commutative magma is a commutative magma. This is sufficient to let
Sage deduce that it's in fact a commutative algebra.
Methods for objects and elements
--------------------------------
Different objects of the same category share some algebraic features, and
very often these features can be encoded in a method, in a generic way.
For example, for every commutative additive monoid, it makes sense to ask
for the sum of a list of elements. Sage's category framework allows to
provide a generic implementation for all objects of a category.
If you want to provide your new category with generic methods for
objects (or elements of objects), then you simply add a nested class
called ``ParentMethods`` (or ``ElementMethods``). The methods of that
class will automatically become methods of the objects (or the
elements). For instance::
sage: P.<x,y> = ZZ[]
sage: P.prod([x,y,2])
2*x*y
sage: P.prod.__module__
'sage.categories.monoids'
sage: P.prod.__func__ is raw_getattr(Monoids().ParentMethods, "prod")
True
We recommend to study the code of one example::
sage: C = CommutativeAdditiveMonoids()
sage: C?? # not tested
.. _category-primer-category-order:
On the order of super categories
--------------------------------
The generic method ``C.all_super_categories()`` determines recursively
the list of *all* super categories of `C`.
The order of the categories in this list does influence the
inheritance of methods for parents and elements. Namely, if `P` is an
object in the category `C` and if `C_1` and `C_2` are both super
categories of `C` defining some method ``foo`` in ``ParentMethods``,
then `P` will use `C_1`'s version of ``foo`` if and only if `C_1`
appears in ``C.all_super_categories()`` before `C_2`.
However this must be considered as an *implementation detail*: if
`C_1` and `C_2` are incomparable categories, then the order in which
they appear must be mathematically irrelevant: in particular, the
methods ``foo`` in `C_1` and `C_2` must have the same semantic. Code
should not rely on any specific order, as it is subject to later
change. Whenever one of the implementations is preferred in some common
subcategory of `C_1` and `C_2`, for example for efficiency reasons,
the ambiguity should be resolved explicitly by definining a
method ``foo`` in this category. See the method ``some_elements`` in
the code of the category :class:`FiniteCoxeterGroups` for an example.
Since :trac:`11943`, ``C.all_super_categories()`` is computed by the
so-called ``C3`` algorithm used by Python to compute Method Resolution
Order of new-style classes. Thus the order in
``C.all_super_categories()``, ``C.parent_class.mro()`` and
``C.element_class.mro()`` are guaranteed to be consistent.
Since :trac:`13589`, the ``C3`` algorithm is put under control of some
total order on categories. This order is not necessarily meaningful,
but it guarantees that ``C3`` always finds a consistent Method
Resolution Order. For background, see
:mod:`sage.misc.c3_controlled`. A visible effect is that the order in
which categories are specified in ``C.super_categories()``, or in a
join category, no longer influences the result of
``C.all_super_categories()``.
Subcategory hook (advanced optimization feature)
------------------------------------------------
The default implementation of the method ``C.is_subcategory(D)`` is to
look up whether `D` appears in ``C.all_super_categories()``. However,
building the list of all the super categories of `C` is an expensive
operation that is sometimes best avoided. For example, if both `C` and
`D` are categories defined over a base, but the bases differ, then one
knows right away that they can not be subcategories of each other.
When such a short-path is known, one can implement a method
``_subcategory_hook_``. Then, ``C.is_subcategory(D)`` first calls
``D._subcategory_hook_(C)``. If this returns ``Unknown``, then
``C.is_subcategory(D)`` tries to find ``D`` in
``C.all_super_categories()``. Otherwise, ``C.is_subcategory(D)``
returns the result of ``D._subcategory_hook_(C)``.
By default, ``D._subcategory_hook_(C)`` tests whether
``issubclass(C.parent_class,D.parent_class)``, which is very often
giving the right answer::
sage: Rings()._subcategory_hook_(Algebras(QQ))
True
sage: HopfAlgebras(QQ)._subcategory_hook_(Algebras(QQ))
False
sage: Algebras(QQ)._subcategory_hook_(HopfAlgebras(QQ))
True
"""
| 37.485119
| 117
| 0.718206
|
e5b8ecc37319f155f9c85a552de48887b2042461
| 1,849
|
py
|
Python
|
self_ade/incremental_eval.py
|
Anonymous4604/Self-ADE_SSD
|
eb4107e17721e17f2dedbdae654a43fc5d291f8c
|
[
"MIT"
] | null | null | null |
self_ade/incremental_eval.py
|
Anonymous4604/Self-ADE_SSD
|
eb4107e17721e17f2dedbdae654a43fc5d291f8c
|
[
"MIT"
] | 8
|
2020-01-28T23:08:26.000Z
|
2022-03-12T00:05:38.000Z
|
self_ade/incremental_eval.py
|
Anonymous4604/Self-ADE_SSD
|
eb4107e17721e17f2dedbdae654a43fc5d291f8c
|
[
"MIT"
] | null | null | null |
import torch
import os
from ssd.data.datasets.evaluation import evaluate
from ssd.modeling.predictor import Predictor
class IncrementalEval(object):
"""
Necessary to compute the mAP of a model on a certain dataset when the predictions are not executed all together
"""
def __init__(self, cfg, test_dataset, output_name):
main_output_dir = cfg.OUTPUT_DIR
self.output_dir = os.path.join(main_output_dir, output_name)
if not os.path.exists(self.output_dir):
try:
os.makedirs(self.output_dir)
except:
logger = logging.getLogger("self_ade.eval")
logger.info("Output dir {} exists".format(self.output_dir))
self.test_dataset = test_dataset
self.predictions = {}
self.device = torch.device(cfg.MODEL.DEVICE)
self.cpu_device = torch.device("cpu")
self.predictor = Predictor(cfg=cfg, iou_threshold=cfg.TEST.NMS_THRESHOLD, score_threshold=cfg.TEST.CONFIDENCE_THRESHOLD, device=self.device)
def add_element(self, model, sample_idx):
"""
Compute a prediction using a certain model on the image at position sample_idx in the dataset
:param model: to be used to perform prediction
:param sample_idx: to identify the image
"""
image = self.test_dataset.get_image(sample_idx, apply_transform=False)
output = self.predictor.predict(image, model=model)
boxes, labels, scores = [o.to(self.cpu_device).numpy() for o in output]
self.predictions[sample_idx] = (boxes, labels, scores)
def compute_map(self):
image_ids = list(sorted(self.predictions.keys()))
predictions = [self.predictions[i] for i in image_ids]
return evaluate(dataset=self.test_dataset, predictions=predictions, output_dir=self.output_dir)
| 42.022727
| 148
| 0.682531
|
9d62d41ff0212e3b6264eee845a7db093333827c
| 4,302
|
py
|
Python
|
src/GrafoMap.py
|
Ruframapi/Diversified-Semantic-Query-Reformulation
|
659c60393102209b3984d4d91120ecdfb32580c4
|
[
"MIT"
] | 2
|
2017-10-31T21:08:55.000Z
|
2019-11-20T05:17:09.000Z
|
src/GrafoMap.py
|
Ruframapi/Diversified-Semantic-Query-Reformulation
|
659c60393102209b3984d4d91120ecdfb32580c4
|
[
"MIT"
] | null | null | null |
src/GrafoMap.py
|
Ruframapi/Diversified-Semantic-Query-Reformulation
|
659c60393102209b3984d4d91120ecdfb32580c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 10:38:25 2016
@author: proyecto
"""
import networkx as nx
import json
class GrafoMap:
G = nx.DiGraph()
alpha=0.85 #Parametro del Pagerank
FileName = "Filename";
FileOutput = "FileOut";
PathGuadarGrafo = "Grafos/";
labelNodeDictionary = {}
jsonDictionary = {}
numNodes = 0
def __init__(self, filename = "Filename", fileoutput = "FileOut"):
""" Constructor """
self.FileName = filename
self.FileOutput = fileoutput;
def dreagrafo(self):
""" Metodo Principal para la Construccion del grafo"""
rdfc = open (self.FileName,'r')
lines = rdfc.readlines()
for line in lines:
conex=line.split("-|")
if len(conex)>0:
self.addnodeG(conex[0])
self.addnodeG(conex[1])
self.addedgeG(conex[0],conex[1],conex[2])
def addedgeG(self,nodeIni,nodeFin,edgeProperty):
if nodeIni.endswith("*"):
nodeIni=nodeIni[:-1]
if nodeFin.endswith("*"):
nodeFin=nodeFin[:-1]
nodeINIid = self.labelNodeDictionary[nodeIni]
nodeFINid = self.labelNodeDictionary[nodeFin]
edgeProperty=edgeProperty.replace('http://www.w3.org/2000/01/rdf-schema#','rdfs:')
edgeProperty=edgeProperty.replace('http://dbpedia.org/ontology/','dbo:')
edgeProperty=edgeProperty.replace('http://purl.org/dc/terms/','dct:')
edgeProperty=edgeProperty.replace("\n","")
indice = [i for i,v in enumerate(self.G.edges()) if (v[0]==nodeINIid and v[1]==nodeFINid)]
if (len(indice)>0): #Si ya existe esta conexion
self.G[nodeINIid][nodeFINid]['weight']+=1
self.G[nodeINIid][nodeFINid]['conexpro']= edgeProperty+"-"+self.G[nodeINIid][nodeFINid]['conexpro']
else:
self.G.add_edge(nodeINIid,nodeFINid,weight=1,conexpro=edgeProperty)
def addnodeG(self,node):
"""Verifica la existencia del nodo y si es es de un path o de la busqueda libre"""
if node.endswith("*"):
path=True
node=node[:-1]
else:
path=False
if node not in self.labelNodeDictionary.keys():
self.numNodes+=1
self.labelNodeDictionary[node] = self.numNodes
if path:
#self.idNodeDictionary[self.numNodes] = node
self.G.add_node(self.numNodes,label=node,camino="True")
else:
self.G.add_node(self.numNodes,label=node,camino="False")
def pageRankG(self):
"""Calculo del pagerank y se agrega como atributo a los nodos"""
pr=nx.pagerank(self.G,self.alpha)
for node,prval in pr.iteritems():
self.G.node[node]['pagerank'] = float("{0:.4f}".format(prval))
def guardagrafo(self):
"""Guarda el grafo en gexf y json"""
name=self.PathGuadarGrafo+self.FileOutput+".gexf"
namej=self.PathGuadarGrafo+self.FileOutput+".json"
nx.write_gexf(self.G,name)
nodes = []
edges = []
colordict = {}
colordict["True"] = "rgb(220,246,215)"
colordict["False"] = "rgb(18,126,233)"
for node,attr in self.G.nodes(data = True):
nodes.append({"label":attr['label'],"id":node,"value":attr['pagerank'],"color":colordict[attr['camino']],"attributes":{"pagerank":attr['pagerank'],"camino":attr['camino'] }})
for edge in self.G.edges(data = True):
edges.append({"source": edge[0],"target":edge[1],"value":edge[2]['weight'],"attributes":{"weight":edge[2]['weight'],"conexpro":edge[2]['conexpro']}})
self.jsonDictionary["nodes"]=nodes
self.jsonDictionary["edges"]=edges
fo=open(namej,"w")
json.dump(self.jsonDictionary,fo)
#print self.jsonDictionary
def ejecutarproceso(self):
"""Ejecuta de forma sequencial el proceso de generacion del grafo y construye un archivo output"""
self.dreagrafo()
self.pageRankG()
self.guardagrafo()
#dbpe = GrafoMap("CA003", "CA003Out")
#dbpe.ejecutarproceso()
#print dbpe.G.nodes(data=True)
| 34.416
| 186
| 0.579498
|
83c6226625096ff2ed5f3792af501ecd8752fda6
| 259
|
py
|
Python
|
docker-rm/controllers/resource/VolumeInstance.py
|
Bhaskers-Blu-Org1/osslm-docker-adaptor
|
2fcccf939da14068b53eee332d8aa05bf1396a9f
|
[
"Apache-2.0"
] | 2
|
2017-11-08T09:32:44.000Z
|
2018-01-17T16:56:05.000Z
|
docker-rm/controllers/resource/VolumeInstance.py
|
Bhaskers-Blu-Org1/osslm-docker-adaptor
|
2fcccf939da14068b53eee332d8aa05bf1396a9f
|
[
"Apache-2.0"
] | 3
|
2018-01-30T09:02:32.000Z
|
2018-10-09T08:34:49.000Z
|
docker-rm/controllers/resource/VolumeInstance.py
|
IBM/osslm-docker-adaptor
|
2fcccf939da14068b53eee332d8aa05bf1396a9f
|
[
"Apache-2.0"
] | 8
|
2017-12-14T11:15:52.000Z
|
2020-06-29T13:54:29.000Z
|
import logging
from controllers.resource.ResourceInstance import ResourceInstance
class VolumeInstance(ResourceInstance):
""" Reponsible for running resource instance lifecycles"""
def __init__(self, network):
self.logger = logging.getLogger(__name__)
| 28.777778
| 66
| 0.814672
|
412dd76825471723ddc5519398e4bb4d475553b9
| 1,063
|
py
|
Python
|
aves2_client/logger.py
|
jd-aig/aves2_client
|
a27ada4696a28c5d9b5cffe7af7e9811a4a4e20f
|
[
"Apache-2.0"
] | null | null | null |
aves2_client/logger.py
|
jd-aig/aves2_client
|
a27ada4696a28c5d9b5cffe7af7e9811a4a4e20f
|
[
"Apache-2.0"
] | null | null | null |
aves2_client/logger.py
|
jd-aig/aves2_client
|
a27ada4696a28c5d9b5cffe7af7e9811a4a4e20f
|
[
"Apache-2.0"
] | 1
|
2020-12-08T05:14:44.000Z
|
2020-12-08T05:14:44.000Z
|
# -*- coding:utf-8 -*-
import sys
import logging
def add_handler(stream, lvl, formatter):
logger = logging.getLogger("")
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(lvl)
return handler
def log2console(lvl=logging.INFO):
# formatter = logging.Formatter(fmt="%(levelname)-8s: %(message)s")
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s: %(name)s: %(message)s',
datefmt='%H:%M:%S'
)
add_handler(sys.stdout, lvl, formatter)
def log2file(logfile, lvl=logging.DEBUG):
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)-8s: %(name)s: %(message)s',
datefmt='%H:%M:%S'
)
add_handler(sys.stdout, lvl, formatter)
add_handler(open(logfile, "w"), lvl, formatter)
def add_file_handler(logger, logfile, fmt="%(message)s"):
handler = logging.FileHandler(logfile)
formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
| 28.72973
| 71
| 0.662277
|
669e7c631df964f4d3d54a7d5334f5cd4c4d527f
| 385
|
py
|
Python
|
libweasyl/libweasyl/alembic/versions/652d9ba475f0_remove_thumbnail_legacy_links.py
|
akash143143/weasyl
|
be42a2313e657e97c4a48432379e37b6a3d4a4af
|
[
"Apache-2.0"
] | 111
|
2016-05-18T04:18:18.000Z
|
2021-11-03T02:05:19.000Z
|
libweasyl/libweasyl/alembic/versions/652d9ba475f0_remove_thumbnail_legacy_links.py
|
akash143143/weasyl
|
be42a2313e657e97c4a48432379e37b6a3d4a4af
|
[
"Apache-2.0"
] | 1,103
|
2016-05-29T05:17:53.000Z
|
2022-03-31T18:12:40.000Z
|
libweasyl/libweasyl/alembic/versions/652d9ba475f0_remove_thumbnail_legacy_links.py
|
TheWug/weasyl
|
a568a542cc58c11e30621fb672c701531d4306a8
|
[
"Apache-2.0"
] | 47
|
2016-05-29T20:48:37.000Z
|
2021-11-12T09:40:40.000Z
|
"""Remove thumbnail-legacy links
Revision ID: 652d9ba475f0
Revises: cc2f96b0ba35
Create Date: 2020-03-01 23:17:41.685935
"""
# revision identifiers, used by Alembic.
revision = '652d9ba475f0'
down_revision = 'cc2f96b0ba35'
from alembic import op
def upgrade():
op.execute("DELETE FROM submission_media_links WHERE link_type = 'thumbnail-legacy'")
def downgrade():
pass
| 17.5
| 89
| 0.750649
|
fe91d09054278fc9604b1565855dd622211d30f9
| 24,435
|
py
|
Python
|
delfin/tests/unit/drivers/hitachi/hnas/constants.py
|
guankc/delfin
|
85c2ac90dba7042a9159f1d4e927020ea166f139
|
[
"Apache-2.0"
] | 4
|
2020-05-07T07:43:43.000Z
|
2020-06-21T15:25:42.000Z
|
delfin/tests/unit/drivers/hitachi/hnas/constants.py
|
guankc/delfin
|
85c2ac90dba7042a9159f1d4e927020ea166f139
|
[
"Apache-2.0"
] | 355
|
2021-03-03T03:50:00.000Z
|
2022-03-14T11:43:18.000Z
|
delfin/tests/unit/drivers/hitachi/hnas/constants.py
|
guankc/delfin
|
85c2ac90dba7042a9159f1d4e927020ea166f139
|
[
"Apache-2.0"
] | 153
|
2021-03-10T09:32:08.000Z
|
2022-02-22T01:32:32.000Z
|
# Copyright 2021 The SODA Authors.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "hitachi",
"model": "hnas",
"ssh": {
"host": "192.168.3.211",
"port": 22,
"username": "manager",
"password": "manager",
}
}
STORAGE_INFO = """\r
cluster-show\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ cluster-show\r
Overall Status = Online\r
Cluster Health = Robust\r
Cluster Mode = Not clustered\r
Cluster Name = pba-hnas-1\r
Cluster UUID = a39f815a-e582-11d6-9000-b76f3098a657\r
Cluster Size = 1\r
Node Name = pba-hnas-1-1\r
Node ID = 1\r
Cluster GenId = 1\r
Cluster Master = No\r
\r
pba-hnas-1-1:$ """
VERSION_INFO = """\r
ver\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ ver\r
\r
Model: HNAS 4060\r
\r
Software: 12.7.4221.12 (built 2016-10-28 21:51:37+01:00)\r
\r
Hardware: NAS Platform (M4SJKW1423160)\r
\r
board MMB1\r
mmb 12.7.4221.12 release (2016-10-28 21:51:37+01:00)\r
\r
board MFB2\r
mfb2hw MB v0132 WL v0132 TD v0132 FD v0132 TC v00C6 RY v00C6 \r
TY v00C6 IC v00C6 WF v007C FS v007C OS v007C WD v007C D0 v0077 \r
Serial no B1423125 (Tue Jun 17 13:38:33 2014)\r
\r
board MCP\r
Serial no B1423160 (Wed Jun 18 20:39:53 2014)\r
\r
pba-hnas-1-1:$ """
LOCATION_INFO = """\r
system-information-get\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ system-information-get\r
\r
Name: pba-hnas-1\r
Location: chengdu\r
Contact: \r
\r
pba-hnas-1-1:$ """
DISK_INFO = """\r
sd-list --scsi\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ sd-list --scsi\r
Device ID: 0\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:00\r
Blocksize: 512\r
Superflush: Default\r
Lun: 0\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0000\r
HDS dev name: 1000\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 1\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:01\r
Blocksize: 512\r
Superflush: Default\r
Lun: 1\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0400\r
HDS dev name: 1001\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 2\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:02\r
Blocksize: 512\r
Superflush: Default\r
Lun: 2\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0000\r
HDS dev name: 1002\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 3\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span1' (capacity 200GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:03\r
Blocksize: 512\r
Superflush: Default\r
Lun: 3\r
Serial number: 212902\r
Site ID: 0\r
Tier: 1\r
HDS ctrlr port: 0400\r
HDS dev name: 1003\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 4\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:04\r
Blocksize: 512\r
Superflush: Default\r
Lun: 4\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1004\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 5\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:05\r
Blocksize: 512\r
Superflush: Default\r
Lun: 5\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1005\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 6\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:06\r
Blocksize: 512\r
Superflush: Default\r
Lun: 6\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1006\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 7\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:07\r
Blocksize: 512\r
Superflush: Default\r
Lun: 7\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1007\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 8\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:08\r
Blocksize: 512\r
Superflush: Default\r
Lun: 8\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 1008\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 9\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:09\r
Blocksize: 512\r
Superflush: Default\r
Lun: 9\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 1009\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 10\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0A\r
Blocksize: 512\r
Superflush: Default\r
Lun: 10\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0400\r
HDS dev name: 100A\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
Device ID: 11\r
Comment: \r
Capacity: 50GiB (53687746560 bytes)\r
Status: OK\r
Role: Primary\r
Access: Allowed\r
Used in span: 'span2' (capacity 400GiB)\r
Type: Make: HITACHI; Model: OPEN-V; Revision: 7303\r
Submodel: HM70\r
Luid: [03:01:00]60:06:0E:80:13:32:66:00:50:20:32:66:00:00:10:0B\r
Blocksize: 512\r
Superflush: Default\r
Lun: 11\r
Serial number: 212902\r
Site ID: 0\r
Tier: None\r
HDS ctrlr port: 0000\r
HDS dev name: 100B\r
HDP pool no: 0\r
GAD: No\r
Queue depth: min 16, default 32, max 512, configured [default],
effective 32\r
\r
pba-hnas-1-1:$ """
POOL_INFO = """\r
span-list\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ span-list\r
Span instance name OK? Free Cap/GiB System drives Con\r
--------------------- --- ---- ------- ------------------------- ---\r
span1 Yes 100% 200 0,1,2,3 90%\r
Tier 0: empty: file systems can't be created or mounted\r
Tier 1: capacity 200GiB; free: 200GiB (100%); HDP pool free 996GiB\r
span2 Yes 86% 400 4,5,6,7;8,9,10,11 90%\r
pba-hnas-1-1:$ """
POOL_DETAIL_INFO = """\r
\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ span-space-distribution\r
Span span2:\r
\r
How each stripeset is used:\r
Stripeset 0:\r
18GiB 9.09% fs1\r
18GiB 9.09% fs2\r
18GiB 9.09% fs3\r
145GiB 72.74% [Free space]\r
Stripeset 1:\r
200GiB 100.00% [Free space]\r
\r
Where each filesystem resides:\r
Filesystem fs1:\r
Stripeset 0 18GiB 100.00%\r
Filesystem fs2:\r
Stripeset 0 18GiB 100.00%\r
Filesystem fs3:\r
Stripeset 0 18GiB 100.00%\r
\r
Span span1:\r
\r
How each stripeset is used:\r
Stripeset 0:\r
200GiB 100.00% [Free space]\r
\r
Where each filesystem resides:\r
\r
pba-hnas-1-1:$"""
ALERT_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ event-log-show -w -s\r
****** Current time : 2021-10-25 11:12:35+08:00 ******\r
8208 Information 2021-11-02 08:26:01+08:00 Chassis device 'md0'
is running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8462 Warning 2021-11-02 08:00:10+08:00 [ pba-hnas-1 ] The
SMU does not have an email
alert profile relating to a managed server.\r
CAUSE: An email alert profile relating to a managed
server must be applied to the SMU so that alert and diagnostic
emails can be sent to the required recipients.\r
RESOLUTION: Go to an SMTP Email Profile page and apply a
profile to the SMU.\r
\r
8208 Information 2021-11-02 04:04:01+08:00 Chassis device 'md2'
is running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8209 Information 2021-11-02 04:04:00+08:00 Chassis device 'md3'
has completed background media scan.\r
CAUSE: Chassis drive volume media check has completed.\r
RESOLUTION: No Action required.\r
\r
9995 Information 2021-11-01 20:50:36+08:00 wq test snmp.\r
CAUSE: A test event was requested.\r
RESOLUTION: No action required.\r
\r\
3303 Information 2021-11-01 19:27:22+08:00 Exceeded socket backlog:
dropping additional connection request from 127.0.0.1:34008->127.0.0.1:206:
this event, Id 3303, happened once in the last 6.25 d on the MMB1.\r
CAUSE: Socket backlogged: could not allow a new connection.\r
RESOLUTION: This is expected behavior on receiving a flurry of
connection requests. If it happens in other circumstances,
run the Performance Info Report, then report this and send the
PIR results to your support provider.\r
\r
8208 Information 2021-11-01 16:44:01+08:00 Chassis device 'md3' is
running background media scan.\r
CAUSE: Chassis drive volume is running a media check.\r
RESOLUTION: No Action required.\r
\r
8462 Warning 2021-11-01 08:00:10+08:00 [ pba-hnas-1 ] The SMU
does not have an email alert profile relating to a managed server.\r
CAUSE: An email alert profile relating to a managed server
must be applied to the SMU so that alert and diagnostic emails
can be sent to the required recipients.\r
RESOLUTION: Go to an SMTP Email Profile page and apply a profile
to the SMU.\r
****** Current time : 2021-10-25 11:12:35+08:00 ******\r
pba-hnas-1-1:$ """
TRAP_INFO = {
'1.3.6.1.4.1.11096.6.1.1':
"8462 Warning: [ pba-hnas-1 ] The SMU does not have an email alert "
"profile relating to a managed server."
}
NODE_INFO = """Linux pba-hnas-1 2.6.32-5-amd64 #1 SMP Sun Dec 21 18:
01:12 UTC 2014 x86_64\r
\r
\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ cluster-show -y\r
Ethernet Mgmnt\r
ID Node Name Status FS Access Aggs Netwrk FC EVS IDs\r
-- --------------- -------- ---------- ---------- ------ --- -------\r
1 pba-hnas-1-1 ONLINE OK Degraded OK OK [0,1,2]\r
pba-hnas-1-1:$ """
FC_PORT_INFO = """\r
fc-hports\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ fc-hports\r
\r
Host Port 1\r
Addrs: 0x1\r
Port name: 50:03:01:70:00:06:8B:01\r
Node name: 50:03:01:70:00:06:8B:00 \r
FC Link is up\r
Status : Good \r
\r
Host Port 2\r
Addrs: not assigned\r
Port name: 50:03:01:70:00:06:8B:02\r
Node name: 50:03:01:70:00:06:8B:00 \r
FC Link is down\r
\r
Host Port 3\r
Addrs: 0x1\r
Port name: 50:03:01:70:00:06:8B:03\r
Node name: 50:03:01:70:00:06:8B:00 \r
FC Link is up\r
Status : Good \r
\r
Host Port 4\r
Addrs: not assigned\r
Port name: 50:03:01:70:00:06:8B:04\r
Node name: 50:03:01:70:00:06:8B:00 \r
FC Link is down\r
\r
pba-hnas-1-1:$ """
FC_PORT_STATUS = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ fc-link-speed\r
FC 1: 8 Gbps\r
FC 2: 4 Gbps\r
FC 3: 8 Gbps\r
FC 4: 8 Gbps\r
pba-hnas-1-1:$ """
ETH_PORT_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ ifconfig\r
ag1 Link encap:1 HWaddr 00-30-17-09-fc-08\r
inet addr:192.168.0.1 Bcast:192.168.0.255 mask:255.255.255.0\r
inet addr:192.168.0.2 Bcast:192.168.0.255 mask:255.255.255.0\r
Link:DOWN Admin:UP MTU:1500 Metric:1 txqueuelen:64\r
\r
ag2 Link encap:1 HWaddr 00-30-17-09-fc-09\r
Link:DOWN Admin:DOWN MTU:1500 Metric:1 txqueuelen:64\r
\r
c1 Link encap:1 HWaddr 00-30-17-09-fc-10\r
inet addr:240.152.166.87 Bcast:240.255.255.255 mask:255.0.0.0\r
Link:DOWN Admin:UP MTU:1488 Metric:2 txqueuelen:64\r
\r
c2 Link encap:1 HWaddr 00-30-17-09-fc-11\r
Link:DOWN Admin:DOWN MTU:1488 Metric:2 txqueuelen:64\r
\r
eth0 Link encap:1 HWaddr 0c-c4-7a-05-9e-a0\r
inet addr:192.168.3.211 Bcast:192.168.3.255 mask:255.255.255.0\r
inet6 addr: fe80::ec4:7aff:fe05:9ea0/64 Scope:Link\r
Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r
\r
eth1 Link encap:1 HWaddr 0c-c4-7a-05-9e-a1\r
inet addr:192.0.2.2 Bcast:192.0.255.255 mask:255.255.0.0\r
inet addr:192.0.2.200 Bcast:192.0.255.255 mask:255.255.0.0\r
Link:DOWN Admin:UP MTU:1500 Metric:4 txqueuelen:64\r
\r
lo Link encap:1 \r
inet addr:127.0.0.1 Bcast:127.255.255.255 mask:255.0.0.0\r
inet6 addr: ::1/128 Scope:Global\r
inet6 addr: fe80::200:ff:fe00:0/64 Scope:Link\r
Link:UP Admin:UP MTU:1500 Metric:4 txqueuelen:64\r
\r
pba-hnas-1-1:$ """
FS_INFO = """\r
filesystem-list\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ filesystem-list\r
Instance name Dev On span State EVS Cap/GiB Confined Flag\r
----------------- ---- ----------- ----- --- ------- -------- ----\r
fs1 1024 span2 Mount 1 18 20 \r
pba-hnas-1-1:$ """
QTREE_INFO = """\r
evs-select 1\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ evs-select 1\r
pba-hnas-1-1[EVS1]:$ virtual-volume list --verbose fs1\r
tree1\r
email : \r
root : /12323\r
tag : 2\r
usage bytes : 0 B files: 1\r
last modified: 2021-09-23 07:18:14.714807865+00:00\r
vol2\r
email : \r
root : /123\r
tag : 1\r
usage bytes : 0 B files: 1\r
last modified: 2021-09-15 07:17:02.790323869+00:00\r
pba-hnas-1-1[EVS1]:$ """
CIFS_SHARE_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ evs-select 1\r
pba-hnas-1-1[EVS1]:$ cifs-share list\r
\r
Share name: tree1\r
Share path: \12323\r
Share users: 0\r
Share online: Yes\r
Share comment: Share associated with Virtual Volume tree1\r
Cache options: Manual local caching for documents\r
ABE enabled: No\r
Continuous Availability: No\r
Access snapshots: Yes\r
Display snapshots: Yes\r
ShadowCopy enabled: Yes\r
Lower case on create: No\r
Follow symlinks: Yes\r
Follow global symlinks: No\r
Scan for viruses: Yes\r
File system label: fs1\r
File system size: 18 GB\r
File system free space: 15.6 GB\r
File system state: \r
formatted = Yes\r
mounted = Yes\r
failed = No\r
thin provisioned = No\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
Home directories: Off\r
Mount point options:\r
\r
Share name: C$\r
Share path: \\r
Share users: 0\r
Share online: Yes\r
Share comment: Default share\r
Cache options: Manual local caching for documents\r
ABE enabled: No\r
Continuous Availability: No\r
Access snapshots: Yes\r
Display snapshots: No\r
ShadowCopy enabled: Yes\r
Lower case on create: No\r
Follow symlinks: Yes\r
Follow global symlinks: No\r
Scan for viruses: Yes\r
File system info: *** not available ***\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
Home directories: Off\r
Mount point options:\r
\r
\r
Share name: vol6\r
Share path: \666\r
Share users: 0\r
Share online: No\r
Share comment: Share associated with Virtual Volume vol6\r
Cache options: Manual local caching for documents\r
ABE enabled: No\r
Continuous Availability: No\r
Access snapshots: Yes\r
Display snapshots: Yes\r
ShadowCopy enabled: Yes\r
Lower case on create: No\r
Follow symlinks: Yes\r
Follow global symlinks: No\r
Scan for viruses: Yes\r
File system info: *** not available ***\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
Home directories: Off\r
Mount point options:\r
\r
pba-hnas-1-1[EVS1]:$ """
NFS_SHARE_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ evs-select 1\r
pba-hnas-1-1[EVS1]:$ nfs-export list\r
\r
Export name: /nfs1\r
Export path: /\r
File system label: fs1\r
File system size: 18 GB\r
File system free space: 15.6 GB\r
File system state: \r
formatted = Yes\r
mounted = Yes\r
failed = No\r
thin provisioned = No\r
Access snapshots: Yes\r
Display snapshots: Yes\r
Read Caching: Disabled\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
\r
Export configuration:\r
192.168.3.163\r
\r
\r
Export name: /vol6\r
Export path: /666\r
File system info: *** not available *** \r
Access snapshots: Yes\r
Display snapshots: Yes\r
Read Caching: Disabled\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
\r
Export configuration:\r
\r
\r
\r
Export name: /vol2\r
Export path: /123\r
File system label: fs1\r
File system size: 18 GB\r
File system free space: 15.6 GB\r
File system state: \r
formatted = Yes\r
mounted = Yes\r
failed = No\r
thin provisioned = No\r
Access snapshots: Yes\r
Display snapshots: Yes\r
Read Caching: Disabled\r
Disaster recovery setting:\r
Recovered = No\r
Transfer setting = Use file system default\r
\r
Export configuration:\r
\r
\r
pba-hnas-1-1[EVS1]:$ """
FS_DETAIL_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ df -k\r
\r
ID Label EVS Size Used Snapshots """\
+ """ Deduped Avail Thin FS Type \r
---- ----- --- ----------- ---------------- --------- """\
+ """ ------- ----------------- ---- ----- \r
1024 fs1 1 18874368 KB 2520544 KB (13%) 0 KB (0%) """\
+ """ NA 16353824 KB (87%) No 32 KB,WFS-2,128 DSBs \r
\r
pba-hnas-1-1:$ """
QUOTA_INFO = """\r
\r
HDS NAS OS Console\r
MAC ID : B7-6F-30-98-A6-57\r
\r
pba-hnas-1-1:$ evs-select 1\r
pba-hnas-1-1[EVS1]:$ quota list fs1\r
Type : Explicit\r
Target : Group: root\r
Usage : 10 GB\r
Limit : 1 GB (Soft)\r
Warning : 75% (768 MB)\r
Critical : 85% (870.4 MB)\r
Reset : 5% (51.2 MB)\r
File Count : 7\r
Limit : 213 (Soft)\r
Warning : 75% (159)\r
Critical : 85% (181)\r
Reset : 5% (10)\r
Generate Events : Disabled\r
\r
Type : Explicit\r
Target : User: root\r
Usage : 10 GB\r
Limit : 1 GB (Soft)\r
Warning : 75% (768 MB)\r
Critical : 85% (870.4 MB)\r
Reset : 5% (51.2 MB)\r
File Count : 7\r
Limit : 213 (Soft)\r
Warning : 75% (159)\r
Critical : 85% (181)\r
Reset : 5% (10)\r
Generate Events : Disabled\r
\r
Type : Explicit\r
Target : ViVol: vol2\r
Usage : 0 B\r
Limit : 1 GB (Soft)\r
Warning : 75% (768 MB)\r
Critical : 85% (870.4 MB)\r
Reset : 5% (51.2 MB)\r
File Count : 1\r
Limit : 213 (Soft)\r
Warning : 75% (159)\r
Critical : 85% (181)\r
Reset : 5% (10)\r
Generate Events : Disabled\r
\r
pba-hnas-1-1[EVS1]:$"""
| 29.054697
| 78
| 0.589482
|
96bc3b352b941f8a4e03e24f7866e3e9af0e1bb6
| 15,927
|
py
|
Python
|
docs/conf_common.py
|
Grilla-Grills/esp-idf
|
c77c4ccf6c43ab09fd89e7c907bf5cf2a3499e3b
|
[
"Apache-2.0"
] | 1
|
2020-08-05T09:24:17.000Z
|
2020-08-05T09:24:17.000Z
|
docs/conf_common.py
|
Grilla-Grills/esp-idf
|
c77c4ccf6c43ab09fd89e7c907bf5cf2a3499e3b
|
[
"Apache-2.0"
] | null | null | null |
docs/conf_common.py
|
Grilla-Grills/esp-idf
|
c77c4ccf6c43ab09fd89e7c907bf5cf2a3499e3b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Common (non-language-specific) configuration for Read The Docs & Sphinx
#
# Based on a Read the Docs Template documentation build configuration file,
# created by sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is imported from a language-specific conf.py (ie en/conf.py or
# zh_CN/conf.py)
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import os.path
import re
import subprocess
from sanitize_version import sanitize_version
from idf_extensions.util import download_file_if_missing
from get_github_rev import get_github_rev
# build_docs on the CI server sometimes fails under Python3. This is a workaround:
sys.setrecursionlimit(3500)
config_dir = os.path.abspath(os.path.dirname(__file__))
# http://stackoverflow.com/questions/12772927/specifying-an-online-image-in-sphinx-restructuredtext-format
#
suppress_warnings = ['image.nonlocal_uri']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe',
'sphinx.ext.todo',
'sphinx_idf_theme',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.actdiag',
'sphinxcontrib.nwdiag',
'sphinxcontrib.rackdiag',
'sphinxcontrib.packetdiag',
'extensions.html_redirects',
'extensions.toctree_filter',
'extensions.list_filter',
# Note: order is important here, events must
# be registered by one extension before they can be
# connected to another extension
'idf_extensions.include_build_file',
'idf_extensions.link_roles',
'idf_extensions.build_system',
'idf_extensions.esp_err_definitions',
'idf_extensions.gen_toolchain_links',
'idf_extensions.gen_version_specific_includes',
'idf_extensions.kconfig_reference',
'idf_extensions.gen_defines',
'idf_extensions.run_doxygen',
'idf_extensions.gen_idf_tools_links',
'idf_extensions.format_idf_target',
'idf_extensions.latex_builder',
'idf_extensions.exclude_docs',
# from https://github.com/pfalcon/sphinx_selective_exclude
'sphinx_selective_exclude.eager_only',
# TODO: determine if we need search_auto_exclude
# 'sphinx_selective_exclude.search_auto_exclude',
]
# sphinx.ext.todo extension parameters
# If the below parameter is True, the extension
# produces output, else it produces nothing.
todo_include_todos = False
# Enabling this fixes cropping of blockdiag edge labels
seqdiag_antialias = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser',
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# This is the full exact version, canonical git version description
# visible when you open index.html.
version = subprocess.check_output(['git', 'describe']).strip().decode('utf-8')
# The 'release' version is the same as version for non-CI builds, but for CI
# builds on a branch then it's replaced with the branch name
release = sanitize_version(version)
print('Version: {0} Release: {1}'.format(version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/inc/**', '_static/', '_build/**']
BT_DOCS = ['api-guides/blufi.rst',
'api-guides/esp-ble-mesh/**',
'api-reference/bluetooth/**']
SDMMC_DOCS = ['api-reference/peripherals/sdmmc_host.rst',
'api-reference/peripherals/sd_pullup_requirements.rst']
SDIO_SLAVE_DOCS = ['api-reference/peripherals/sdio_slave.rst']
MCPWM_DOCS = ['api-reference/peripherals/mcpwm.rst']
LEGACY_DOCS = ['api-guides/build-system-legacy.rst',
'gnu-make-legacy.rst',
'api-guides/ulp-legacy.rst',
'api-guides/unit-tests-legacy.rst',
'get-started-legacy/**']
ESP32_DOCS = ['api-guides/ulp_instruction_set.rst',
'api-reference/system/himem.rst',
'api-guides/RF_calibration.rst',
'api-reference/system/ipc.rst',
'security/secure-boot-v1.rst',
'api-reference/peripherals/secure_element.rst',
'hw-reference/esp32/**'] + LEGACY_DOCS
ESP32S2_DOCS = ['esp32s2.rst',
'hw-reference/esp32s2/**',
'api-guides/ulps2_instruction_set.rst',
'api-guides/dfu.rst',
'api-guides/usb-console.rst',
'api-guides/ulp-risc-v.rst',
'api-reference/peripherals/hmac.rst',
'api-reference/peripherals/ds.rst',
'api-reference/peripherals/spi_slave_hd.rst',
'api-reference/peripherals/temp_sensor.rst'
'']
# format: {tag needed to include: documents to included}, tags are parsed from sdkconfig and peripheral_caps.h headers
conditional_include_dict = {'SOC_BT_SUPPORTED':BT_DOCS,
'SOC_SDMMC_HOST_SUPPORTED':SDMMC_DOCS,
'SOC_SDIO_SLAVE_SUPPORTED':SDIO_SLAVE_DOCS,
'SOC_MCPWM_SUPPORTED':MCPWM_DOCS,
'esp32':ESP32_DOCS,
'esp32s2':ESP32S2_DOCS}
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# Extra options required by sphinx_idf_theme
project_slug = 'esp-idf'
versions_url = 'https://dl.espressif.com/dl/esp-idf/idf_versions.js'
idf_targets = ['esp32', 'esp32s2']
languages = ['en', 'zh_CN']
project_homepage = "https://github.com/espressif/esp-idf"
# -- Options for HTML output ----------------------------------------------
# Custom added feature to allow redirecting old URLs
#
# Redirects should be listed in page_redirects.xt
#
with open("../page_redirects.txt") as f:
lines = [re.sub(" +", " ", line.strip()) for line in f.readlines() if line.strip() != "" and not line.startswith("#")]
for line in lines: # check for well-formed entries
if len(line.split(' ')) != 2:
raise RuntimeError("Invalid line in page_redirects.txt: %s" % line)
html_redirect_pages = [tuple(line.split(' ')) for line in lines]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_idf_theme'
# context used by sphinx_idf_theme
html_context = {
"display_github": True, # Add 'Edit on Github' link instead of 'View page source'
"github_user": "espressif",
"github_repo": "esp-idf",
"github_version": get_github_rev(),
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../_static/espressif-logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_template_dir = os.path.join(config_dir, 'latex_templates')
preamble = ''
with open(os.path.join(latex_template_dir, 'preamble.tex')) as f:
preamble = f.read()
titlepage = ''
with open(os.path.join(latex_template_dir, 'titlepage.tex')) as f:
titlepage = f.read()
latex_elements = {
'papersize': 'a4paper',
# Latex figure (float) alignment
'figure_align':'htbp',
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'fncychap': '\\usepackage[Sonny]{fncychap}',
'preamble': preamble,
'maketitle': titlepage,
}
# The name of an image file (relative to this directory) to place at the bottom of
# the title page.
latex_logo = "../_static/espressif2.pdf"
latex_engine = 'xelatex'
latex_use_xindy = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Override RTD CSS theme to introduce the theme corrections
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
def setup(app):
app.add_stylesheet('theme_overrides.css')
# these two must be pushed in by build_docs.py
if "idf_target" not in app.config:
app.add_config_value('idf_target', None, 'env')
app.add_config_value('idf_targets', None, 'env')
app.add_config_value('conditional_include_dict', None, 'env')
app.add_config_value('docs_to_build', None, 'env')
# Breathe extension variables (depend on build_dir)
# note: we generate into xml_in and then copy_if_modified to xml dir
app.config.breathe_projects = {"esp32-idf": os.path.join(app.config.build_dir, "xml_in/")}
app.config.breathe_default_project = "esp32-idf"
setup_diag_font(app)
# Config values pushed by -D using the cmdline is not available when setup is called
app.connect('config-inited', setup_config_values)
app.connect('config-inited', setup_html_context)
def setup_config_values(app, config):
# Sets up global config values needed by other extensions
idf_target_title_dict = {
'esp32': 'ESP32',
'esp32s2': 'ESP32-S2'
}
app.add_config_value('idf_target_title_dict', idf_target_title_dict, 'env')
pdf_name = "esp-idf-{}-{}-{}".format(app.config.language, app.config.version, app.config.idf_target)
app.add_config_value('pdf_file', pdf_name, 'env')
def setup_html_context(app, config):
# Setup path for 'edit on github'-link
config.html_context['conf_py_path'] = "/docs/{}/".format(app.config.language)
def setup_diag_font(app):
# blockdiag and other tools require a font which supports their character set
# the font file is stored on the download server to save repo size
font_name = {
'en': 'DejaVuSans.ttf',
'zh_CN': 'NotoSansSC-Regular.otf',
}[app.config.language]
font_dir = os.path.join(config_dir, '_static')
assert os.path.exists(font_dir)
print("Downloading font file %s for %s" % (font_name, app.config.language))
download_file_if_missing('https://dl.espressif.com/dl/esp-idf/docs/_static/{}'.format(font_name), font_dir)
font_path = os.path.abspath(os.path.join(font_dir, font_name))
assert os.path.exists(font_path)
app.config.blockdiag_fontpath = font_path
app.config.seqdiag_fontpath = font_path
app.config.actdiag_fontpath = font_path
app.config.nwdiag_fontpath = font_path
app.config.rackdiag_fontpath = font_path
app.config.packetdiag_fontpath = font_path
| 34.775109
| 122
| 0.686444
|
c725c4dc6909653d7c53f22470af8578e2bc747e
| 37,559
|
py
|
Python
|
adafruit_rfm9x.py
|
lgnashold/Adafruit_CircuitPython_RFM9x
|
9350b5d151444499ff022969d9299c8a202d8b7f
|
[
"MIT"
] | null | null | null |
adafruit_rfm9x.py
|
lgnashold/Adafruit_CircuitPython_RFM9x
|
9350b5d151444499ff022969d9299c8a202d8b7f
|
[
"MIT"
] | null | null | null |
adafruit_rfm9x.py
|
lgnashold/Adafruit_CircuitPython_RFM9x
|
9350b5d151444499ff022969d9299c8a202d8b7f
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2017 Tony DiCola for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_rfm9x`
====================================================
CircuitPython module for the RFM95/6/7/8 LoRa 433/915mhz radio modules. This is
adapted from the Radiohead library RF95 code from:
http: www.airspayce.com/mikem/arduino/RadioHead/
* Author(s): Tony DiCola, Jerry Needell
"""
import time
import random
from micropython import const
import adafruit_bus_device.spi_device as spidev
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_RFM9x.git"
# Internal constants:
# Register names (FSK Mode even though we use LoRa instead, from table 85)
# Common to LORA and FSK
_RH_RF95_REG_00_FIFO = const(0x00)
_RH_RF95_REG_01_OP_MODE = const(0x01)
# FSK / OOK
_RH_RF95_REG_02_BITRATE_MSB = const(0x02)
_RH_RF95_REG_03_BITRATE_LSB = const(0x03)
# Common to both
_RH_RF95_REG_06_FRF_MSB = const(0x06)
_RH_RF95_REG_07_FRF_MID = const(0x07)
_RH_RF95_REG_08_FRF_LSB = const(0x08)
_RH_RF95_REG_09_PA_CONFIG = const(0x09)
_RH_RF95_REG_0A_PA_RAMP = const(0x0A)
_RH_RF95_REG_0B_OCP = const(0x0B)
_RH_RF95_REG_0C_LNA = const(0x0C)
# LORA
_RH_RF95_REG_0D_FIFO_ADDR_PTR = const(0x0D)
_RH_RF95_REG_0E_FIFO_TX_BASE_ADDR = const(0x0E)
_RH_RF95_REG_0F_FIFO_RX_BASE_ADDR = const(0x0F)
_RH_RF95_REG_10_FIFO_RX_CURRENT_ADDR = const(0x10)
_RH_RF95_REG_11_IRQ_FLAGS_MASK = const(0x11)
_RH_RF95_REG_12_IRQ_FLAGS = const(0x12)
_RH_RF95_REG_13_RX_NB_BYTES = const(0x13)
_RH_RF95_REG_14_RX_HEADER_CNT_VALUE_MSB = const(0x14)
_RH_RF95_REG_15_RX_HEADER_CNT_VALUE_LSB = const(0x15)
_RH_RF95_REG_16_RX_PACKET_CNT_VALUE_MSB = const(0x16)
_RH_RF95_REG_17_RX_PACKET_CNT_VALUE_LSB = const(0x17)
_RH_RF95_REG_18_MODEM_STAT = const(0x18)
_RH_RF95_REG_19_PKT_SNR_VALUE = const(0x19)
_RH_RF95_REG_1A_PKT_RSSI_VALUE = const(0x1A)
_RH_RF95_REG_1B_RSSI_VALUE = const(0x1B)
_RH_RF95_REG_1C_HOP_CHANNEL = const(0x1C)
_RH_RF95_REG_1D_MODEM_CONFIG1 = const(0x1D)
_RH_RF95_REG_1E_MODEM_CONFIG2 = const(0x1E)
_RH_RF95_REG_1F_SYMB_TIMEOUT_LSB = const(0x1F)
_RH_RF95_REG_20_PREAMBLE_MSB = const(0x20)
_RH_RF95_REG_21_PREAMBLE_LSB = const(0x21)
_RH_RF95_REG_22_PAYLOAD_LENGTH = const(0x22)
_RH_RF95_REG_23_MAX_PAYLOAD_LENGTH = const(0x23)
_RH_RF95_REG_24_HOP_PERIOD = const(0x24)
_RH_RF95_REG_25_FIFO_RX_BYTE_ADDR = const(0x25)
_RH_RF95_REG_26_MODEM_CONFIG3 = const(0x26)
_RH_RF95_REG_40_DIO_MAPPING1 = const(0x40)
_RH_RF95_REG_41_DIO_MAPPING2 = const(0x41)
_RH_RF95_REG_42_VERSION = const(0x42)
# OOK Registers
_RH_OOK_REG_11_RSSI_VALUE = const(0x11)
_RH_OOK_REG_30_PACKET_CONFIG1 = const(0x30)
_RH_OOK_REG_31_PACKET_CONFIG2 = const(0x31)
_RH_OOK_REG_32_PAYLOAD_LENGTH = const(0x32)
_RH_OOK_REG_5D_BITRATE_FRAC = const(0x5d)
_RH_OOK_REG_3E_IRQ_FLAGS1 = const(0x3e)
_RH_OOK_REG_3E_IRQ_FLAGS2 = const(0x3f)
# Common
_RH_RF95_REG_4B_TCXO = const(0x4B)
_RH_RF95_REG_4D_PA_DAC = const(0x4D)
_RH_RF95_REG_5B_FORMER_TEMP = const(0x5B)
_RH_RF95_REG_61_AGC_REF = const(0x61)
_RH_RF95_REG_62_AGC_THRESH1 = const(0x62)
_RH_RF95_REG_63_AGC_THRESH2 = const(0x63)
_RH_RF95_REG_64_AGC_THRESH3 = const(0x64)
_RH_RF95_DETECTION_OPTIMIZE = const(0x31)
_RH_RF95_DETECTION_THRESHOLD = const(0x37)
# CONSTANTS
_RH_RF95_PA_DAC_DISABLE = const(0x04)
_RH_RF95_PA_DAC_ENABLE = const(0x07)
# The crystal oscillator frequency of the module
_RH_RF95_FXOSC = 32000000.0
# The Frequency Synthesizer step = RH_RF95_FXOSC / 2^^19
_RH_RF95_FSTEP = _RH_RF95_FXOSC / 524288
# RadioHead specific compatibility constants.
_RH_BROADCAST_ADDRESS = const(0xFF)
# The acknowledgement bit in the FLAGS
# The top 4 bits of the flags are reserved for RadioHead. The lower 4 bits are reserved
# for application layer use.
_RH_FLAGS_ACK = const(0x80)
_RH_FLAGS_RETRY = const(0x40)
# User facing constants:
SLEEP_MODE = 0b000
STANDBY_MODE = 0b001
FS_TX_MODE = 0b010
TX_MODE = 0b011
FS_RX_MODE = 0b100
RX_MODE = 0b101
_FSK_MODULATION = const(0x0)
_OOK_MODULATION = const(0x1)
# Disable the too many instance members warning. Pylint has no knowledge
# of the context and is merely guessing at the proper amount of members. This
# is a complex chip which requires exposing many attributes and state. Disable
# the warning to work around the error.
# pylint: disable=too-many-instance-attributes
class RFM9x:
"""Interface to a RFM95/6/7/8 LoRa radio module. Allows sending and
receivng bytes of data in long range LoRa mode at a support board frequency
(433/915mhz).
You must specify the following parameters:
- spi: The SPI bus connected to the radio.
- cs: The CS pin DigitalInOut connected to the radio.
- reset: The reset/RST pin DigialInOut connected to the radio.
- frequency: The frequency (in mhz) of the radio module (433/915mhz typically).
You can optionally specify:
- preamble_length: The length in bytes of the packet preamble (default 8).
- high_power: Boolean to indicate a high power board (RFM95, etc.). Default
is True for high power.
- baudrate: Baud rate of the SPI connection, default is 10mhz but you might
choose to lower to 1mhz if using long wires or a breadboard.
Remember this library makes a best effort at receiving packets with pure
Python code. Trying to receive packets too quickly will result in lost data
so limit yourself to simple scenarios of sending and receiving single
packets at a time.
Also note this library tries to be compatible with raw RadioHead Arduino
library communication. This means the library sets up the radio modulation
to match RadioHead's defaults and assumes that each packet contains a
4 byte header compatible with RadioHead's implementation.
Advanced RadioHead features like address/node specific packets
or "reliable datagram" delivery are supported however due to the
limitations noted, "reliable datagram" is still subject to missed packets but with it,
sender is notified if a packet has potentially been missed.
"""
# Global buffer for SPI commands
_BUFFER = bytearray(4)
class _RegisterBits:
# Class to simplify access to the many configuration bits avaialable
# on the chip's registers. This is a subclass here instead of using
# a higher level module to increase the efficiency of memory usage
# (all of the instances of this bit class will share the same buffer
# used by the parent RFM69 class instance vs. each having their own
# buffer and taking too much memory).
# Quirk of pylint that it requires public methods for a class. This
# is a decorator class in Python and by design it has no public methods.
# Instead it uses dunder accessors like get and set below. For some
# reason pylint can't figure this out so disable the check.
# pylint: disable=too-few-public-methods
# Again pylint fails to see the true intent of this code and warns
# against private access by calling the write and read functions below.
# This is by design as this is an internally used class. Disable the
# check from pylint.
# pylint: disable=protected-access
def __init__(self, address, *, offset=0, bits=1):
assert 0 <= offset <= 7
assert 1 <= bits <= 8
assert (offset + bits) <= 8
self._address = address
self._mask = 0
for _ in range(bits):
self._mask <<= 1
self._mask |= 1
self._mask <<= offset
self._offset = offset
def __get__(self, obj, objtype):
reg_value = obj._read_u8(self._address)
return (reg_value & self._mask) >> self._offset
def __set__(self, obj, val):
reg_value = obj._read_u8(self._address)
reg_value &= ~self._mask
reg_value |= (val & 0xFF) << self._offset
obj._write_u8(self._address, reg_value)
operation_mode = _RegisterBits(_RH_RF95_REG_01_OP_MODE, bits=3)
low_frequency_mode = _RegisterBits(_RH_RF95_REG_01_OP_MODE, offset=3, bits=1)
modulation_type = _RegisterBits(_RH_RF95_REG_01_OP_MODE, offset=5, bits=2)
# Long range/LoRa mode can only be set in sleep mode!
long_range_mode = _RegisterBits(_RH_RF95_REG_01_OP_MODE, offset=7, bits=1)
output_power = _RegisterBits(_RH_RF95_REG_09_PA_CONFIG, bits=4)
max_power = _RegisterBits(_RH_RF95_REG_09_PA_CONFIG, offset=4, bits=3)
pa_select = _RegisterBits(_RH_RF95_REG_09_PA_CONFIG, offset=7, bits=1)
pa_dac = _RegisterBits(_RH_RF95_REG_4D_PA_DAC, bits=3)
dio0_mapping = _RegisterBits(_RH_RF95_REG_40_DIO_MAPPING1, offset=6, bits=2)
bw_bins = (7800, 10400, 15600, 20800, 31250, 41700, 62500, 125000, 250000)
# FSK / OOK Specific
packet_format = _RegisterBits(_RH_OOK_REG_30_PACKET_CONFIG1, offset=7, bits=1)
crc_enabled = _RegisterBits(_RH_OOK_REG_30_PACKET_CONFIG1, offset=4, bits=1)
data_mode = _RegisterBits(_RH_OOK_REG_31_PACKET_CONFIG2, offset=6, bits=1)
payload_length_msb = _RegisterBits(_RH_OOK_REG_31_PACKET_CONFIG2, bits=2)
def __init__(
self,
spi,
cs,
reset,
frequency,
*,
lora_mode=True,
preamble_length=8,
high_power=True,
baudrate=5000000
):
self.lora_mode = lora_mode
self.high_power = high_power
# Device support SPI mode 0 (polarity & phase = 0) up to a max of 10mhz.
# Set Default Baudrate to 5MHz to avoid problems
self._device = spidev.SPIDevice(spi, cs, baudrate=baudrate, polarity=0, phase=0)
# Setup reset as a digital output - initially High
# This line is pulled low as an output quickly to trigger a reset.
self._reset = reset
# initialize Reset High
self._reset.switch_to_output(value=True)
self.reset()
# No device type check! Catch an error from the very first request and
# throw a nicer message to indicate possible wiring problems.
version = self._read_u8(_RH_RF95_REG_42_VERSION)
if version != 18:
raise RuntimeError(
"Failed to find rfm9x with expected version -- check wiring"
)
# Set sleep mode, wait 10s and confirm in sleep mode (basic device check).
# Also set long range mode (LoRa mode) as it can only be done in sleep.
self.sleep()
time.sleep(0.01)
if self.lora_mode:
self.long_range_mode = True
if self.operation_mode != SLEEP_MODE or not self.long_range_mode:
raise RuntimeError("Failed to configure radio for LoRa mode, check wiring!")
else:
# Set to FSK / OOK mode
self.long_range_mode = False
if self.operation_mode != SLEEP_MODE or not self.long_range_mode:
raise RuntimeError("Failed to configure radio for OOK mode, check wiring!")
self.modulation_type = _OOK_MODULATION
# clear default setting for access to LF registers if frequency > 525MHz
if frequency > 525:
self.low_frequency_mode = 0
if self.lora_mode:
# Setup entire 256 byte FIFO
self._write_u8(_RH_RF95_REG_0E_FIFO_TX_BASE_ADDR, 0x00)
self._write_u8(_RH_RF95_REG_0F_FIFO_RX_BASE_ADDR, 0x00)
# We can't configure base addr's in FSK / OOK mode
# Set mode idle
self.idle()
# Set frequency
self.frequency_mhz = frequency
# Set preamble length (default 8 bytes to match radiohead).
self.preamble_length = preamble_length
# Defaults set modem config to RadioHead compatible Bw125Cr45Sf128 mode.
self.signal_bandwidth = 125000
self.coding_rate = 5
self.spreading_factor = 7
# Default to disable CRC checking on incoming packets.
self.enable_crc = False
# Note no sync word is set for LoRa mode either!
# This is the default value?
# self._write_u8(_RH_RF95_REG_26_MODEM_CONFIG3, 0x00) # Preamble lsb?
# Set transmit power to 13 dBm, a safe value any module supports.
self.tx_power = 13
# initialize last RSSI reading
self.last_rssi = 0.0
"""The RSSI of the last received packet. Stored when the packet was received.
This instantaneous RSSI value may not be accurate once the
operating mode has been changed.
"""
# initialize timeouts and delays delays
self.ack_wait = 0.5
"""The delay time before attempting a retry after not receiving an ACK"""
self.receive_timeout = 0.5
"""The amount of time to poll for a received packet.
If no packet is received, the returned packet will be None
"""
self.xmit_timeout = 2.0
"""The amount of time to wait for the HW to transmit the packet.
This is mainly used to prevent a hang due to a HW issue
"""
self.ack_retries = 5
"""The number of ACK retries before reporting a failure."""
self.ack_delay = None
"""The delay time before attemting to send an ACK.
If ACKs are being missed try setting this to .1 or .2.
"""
# initialize sequence number counter for reliabe datagram mode
self.sequence_number = 0
# create seen Ids list
self.seen_ids = bytearray(256)
# initialize packet header
# node address - default is broadcast
self.node = _RH_BROADCAST_ADDRESS
"""The default address of this Node. (0-255).
If not 255 (0xff) then only packets address to this node will be accepted.
First byte of the RadioHead header.
"""
# destination address - default is broadcast
self.destination = _RH_BROADCAST_ADDRESS
"""The default destination address for packet transmissions. (0-255).
If 255 (0xff) then any receiving node should accept the packet.
Second byte of the RadioHead header.
"""
# ID - contains seq count for reliable datagram mode
self.identifier = 0
"""Automatically set to the sequence number when send_with_ack() used.
Third byte of the RadioHead header.
"""
# flags - identifies ack/reetry packet for reliable datagram mode
self.flags = 0
"""Upper 4 bits reserved for use by Reliable Datagram Mode.
Lower 4 bits may be used to pass information.
Fourth byte of the RadioHead header.
"""
self.crc_error_count = 0
# pylint: disable=no-member
# Reconsider pylint: disable when this can be tested
def _read_into(self, address, buf, length=None):
# Read a number of bytes from the specified address into the provided
# buffer. If length is not specified (the default) the entire buffer
# will be filled.
if length is None:
length = len(buf)
with self._device as device:
self._BUFFER[0] = address & 0x7F # Strip out top bit to set 0
# value (read).
device.write(self._BUFFER, end=1)
device.readinto(buf, end=length)
def _read_u8(self, address):
# Read a single byte from the provided address and return it.
self._read_into(address, self._BUFFER, length=1)
return self._BUFFER[0]
def _write_from(self, address, buf, length=None):
# Write a number of bytes to the provided address and taken from the
# provided buffer. If no length is specified (the default) the entire
# buffer is written.
if length is None:
length = len(buf)
with self._device as device:
self._BUFFER[0] = (address | 0x80) & 0xFF # Set top bit to 1 to
# indicate a write.
device.write(self._BUFFER, end=1)
device.write(buf, end=length)
def _write_u8(self, address, val):
# Write a byte register to the chip. Specify the 7-bit address and the
# 8-bit value to write to that address.
with self._device as device:
self._BUFFER[0] = (address | 0x80) & 0xFF # Set top bit to 1 to
# indicate a write.
self._BUFFER[1] = val & 0xFF
device.write(self._BUFFER, end=2)
def reset(self):
"""Perform a reset of the chip."""
# See section 7.2.2 of the datasheet for reset description.
self._reset.value = False # Set Reset Low
time.sleep(0.0001) # 100 us
self._reset.value = True # set Reset High
time.sleep(0.005) # 5 ms
def idle(self):
"""Enter idle standby mode."""
self.operation_mode = STANDBY_MODE
def sleep(self):
"""Enter sleep mode."""
self.operation_mode = SLEEP_MODE
def listen(self):
"""Listen for packets to be received by the chip. Use :py:func:`receive`
to listen, wait and retrieve packets as they're available.
"""
self.operation_mode = RX_MODE
self.dio0_mapping = 0b00 # Interrupt on rx done.
def transmit(self):
"""Transmit a packet which is queued in the FIFO. This is a low level
function for entering transmit mode and more. For generating and
transmitting a packet of data use :py:func:`send` instead.
"""
self.operation_mode = TX_MODE
self.dio0_mapping = 0b01 # Interrupt on tx done.
@property
def preamble_length(self):
"""The length of the preamble for sent and received packets, an unsigned
16-bit value. Received packets must match this length or they are
ignored! Set to 8 to match the RadioHead RFM95 library.
"""
msb = self._read_u8(_RH_RF95_REG_20_PREAMBLE_MSB)
lsb = self._read_u8(_RH_RF95_REG_21_PREAMBLE_LSB)
return ((msb << 8) | lsb) & 0xFFFF
@preamble_length.setter
def preamble_length(self, val):
assert 0 <= val <= 65535
self._write_u8(_RH_RF95_REG_20_PREAMBLE_MSB, (val >> 8) & 0xFF)
self._write_u8(_RH_RF95_REG_21_PREAMBLE_LSB, val & 0xFF)
@property
def frequency_mhz(self):
"""The frequency of the radio in Megahertz. Only the allowed values for
your radio must be specified (i.e. 433 vs. 915 mhz)!
"""
msb = self._read_u8(_RH_RF95_REG_06_FRF_MSB)
mid = self._read_u8(_RH_RF95_REG_07_FRF_MID)
lsb = self._read_u8(_RH_RF95_REG_08_FRF_LSB)
frf = ((msb << 16) | (mid << 8) | lsb) & 0xFFFFFF
frequency = (frf * _RH_RF95_FSTEP) / 1000000.0
return frequency
@frequency_mhz.setter
def frequency_mhz(self, val):
if val < 240 or val > 960:
raise RuntimeError("frequency_mhz must be between 240 and 960")
# Calculate FRF register 24-bit value.
frf = int((val * 1000000.0) / _RH_RF95_FSTEP) & 0xFFFFFF
# Extract byte values and update registers.
msb = frf >> 16
mid = (frf >> 8) & 0xFF
lsb = frf & 0xFF
self._write_u8(_RH_RF95_REG_06_FRF_MSB, msb)
self._write_u8(_RH_RF95_REG_07_FRF_MID, mid)
self._write_u8(_RH_RF95_REG_08_FRF_LSB, lsb)
@property
def tx_power(self):
"""The transmit power in dBm. Can be set to a value from 5 to 23 for
high power devices (RFM95/96/97/98, high_power=True) or -1 to 14 for low
power devices. Only integer power levels are actually set (i.e. 12.5
will result in a value of 12 dBm).
The actual maximum setting for high_power=True is 20dBm but for values > 20
the PA_BOOST will be enabled resulting in an additional gain of 3dBm.
The actual setting is reduced by 3dBm.
The reported value will reflect the reduced setting.
"""
if self.high_power:
return self.output_power + 5
return self.output_power - 1
@tx_power.setter
def tx_power(self, val):
# Same for both
val = int(val)
if self.high_power:
if val < 5 or val > 23:
raise RuntimeError("tx_power must be between 5 and 23")
# Enable power amp DAC if power is above 20 dB.
# Lower setting by 3db when PA_BOOST enabled - see Data Sheet Section 6.4
if val > 20:
self.pa_dac = _RH_RF95_PA_DAC_ENABLE
val -= 3
else:
self.pa_dac = _RH_RF95_PA_DAC_DISABLE
self.pa_select = True
self.output_power = (val - 5) & 0x0F
else:
assert -1 <= val <= 14
self.pa_select = False
self.max_power = 0b111 # Allow max power output.
self.output_power = (val + 1) & 0x0F
@property
def rssi(self):
"""The received strength indicator (in dBm) of the last received message."""
# Read RSSI register and convert to value using formula in datasheet.
# Remember in LoRa mode the payload register changes function to RSSI!
if self.lora_mode:
return self._read_u8(_RH_RF95_REG_1A_PKT_RSSI_VALUE) - 137
else:
# In DBM, for OOK / FSK
return - self._read_u8(_RH_OOK_REG_11_RSSI_VALUE) / 2
@property
def signal_bandwidth(self):
"""The signal bandwidth used by the radio (try setting to a higher
value to increase throughput or to a lower value to increase the
likelihood of successfully received payloads). Valid values are
listed in RFM9x.bw_bins. Only used with LoRa"""
bw_id = (self._read_u8(_RH_RF95_REG_1D_MODEM_CONFIG1) & 0xF0) >> 4
if bw_id >= len(self.bw_bins):
current_bandwidth = 500000
else:
current_bandwidth = self.bw_bins[bw_id]
return current_bandwidth
@signal_bandwidth.setter
def signal_bandwidth(self, val):
# Set signal bandwidth (set to 125000 to match RadioHead Bw125).
for bw_id, cutoff in enumerate(self.bw_bins):
if val <= cutoff:
break
else:
bw_id = 9
self._write_u8(
_RH_RF95_REG_1D_MODEM_CONFIG1,
(self._read_u8(_RH_RF95_REG_1D_MODEM_CONFIG1) & 0x0F) | (bw_id << 4),
)
@property
def bitrate(self):
"""The bitrate of the radio, in OOK / FSK mode."""
msb = self._read_u8(_RH_RF95_REG_02_BITRATE_MSB) << 8
lsb = self._read_u8(_RH_RF95_REG_03_BITRATE_LSB)
# Frac has no affect in OOK mode
return _RH_RF95_FXOSC / (msb + lsb)
@bitrate.setter
def bitrate(self, bitrate):
# TODO: Check if an acceptable value
reg_value = _RH_RF95_FXOSC // bitrate
self._write_u8(_RH_RF95_REG_02_BITRATE_MSB, (reg_value >> 8) & 0xFF)
self._write_u8(_RH_RF95_REG_03_BITRATE_LSB, reg_value & 0xFF)
@property
def coding_rate(self):
"""The coding rate used by the radio to control forward error
correction (try setting to a higher value to increase tolerance of
short bursts of interference or to a lower value to increase bit
rate). Valid values are limited to 5, 6, 7, or 8."""
cr_id = (self._read_u8(_RH_RF95_REG_1D_MODEM_CONFIG1) & 0x0E) >> 1
denominator = cr_id + 4
return denominator
@coding_rate.setter
def coding_rate(self, val):
# Set coding rate (set to 5 to match RadioHead Cr45). Only works for Lora.
denominator = min(max(val, 5), 8)
cr_id = denominator - 4
self._write_u8(
_RH_RF95_REG_1D_MODEM_CONFIG1,
(self._read_u8(_RH_RF95_REG_1D_MODEM_CONFIG1) & 0xF1) | (cr_id << 1),
)
@property
def spreading_factor(self):
"""The spreading factor used by the radio (try setting to a higher
value to increase the receiver's ability to distinguish signal from
noise or to a lower value to increase the data transmission rate).
Valid values are limited to 6, 7, 8, 9, 10, 11, or 12."""
sf_id = (self._read_u8(_RH_RF95_REG_1E_MODEM_CONFIG2) & 0xF0) >> 4
return sf_id
@spreading_factor.setter
def spreading_factor(self, val):
# Set spreading factor (set to 7 to match RadioHead Sf128).
val = min(max(val, 6), 12)
self._write_u8(_RH_RF95_DETECTION_OPTIMIZE, 0xC5 if val == 6 else 0xC3)
self._write_u8(_RH_RF95_DETECTION_THRESHOLD, 0x0C if val == 6 else 0x0A)
self._write_u8(
_RH_RF95_REG_1E_MODEM_CONFIG2,
(
(self._read_u8(_RH_RF95_REG_1E_MODEM_CONFIG2) & 0x0F)
| ((val << 4) & 0xF0)
),
)
@property
def enable_crc(self):
"""Set to True to enable hardware CRC checking of incoming packets.
Incoming packets that fail the CRC check are not processed. Set to
False to disable CRC checking and process all incoming packets."""
return (self._read_u8(_RH_RF95_REG_1E_MODEM_CONFIG2) & 0x04) == 0x04
@enable_crc.setter
def enable_crc(self, val):
# Optionally enable CRC checking on incoming packets.
if val:
self._write_u8(
_RH_RF95_REG_1E_MODEM_CONFIG2,
self._read_u8(_RH_RF95_REG_1E_MODEM_CONFIG2) | 0x04,
)
else:
self._write_u8(
_RH_RF95_REG_1E_MODEM_CONFIG2,
self._read_u8(_RH_RF95_REG_1E_MODEM_CONFIG2) & 0xFB,
)
def tx_done(self):
"""Transmit status"""
if self.lora_mode:
return (self._read_u8(_RH_RF95_REG_12_IRQ_FLAGS) & 0x8) >> 3
else:
return (self._read_u8(_RH_OOK_REG_3E_IRQ_FLAGS1) & 0x20) >> 5
def rx_done(self):
"""Receive status"""
if self.lora_mode:
return (self._read_u8(_RH_RF95_REG_12_IRQ_FLAGS) & 0x40) >> 6
else:
return (self._read_u8(_RH_OOK_REG_3E_IRQ_FLAGS1) & 0x40) >> 6
def _clear_interrupt_flags(self):
if self.lora_mode:
self._write_u8(_RH_RF95_REG_12_IRQ_FLAGS, 0xFF)
else:
self._write_u8(_RH_OOK_REG_3E_IRQ_FLAGS1, 0xFF)
def crc_error(self):
"""crc status"""
# TODO: Implement for FSK / OOK
return (self._read_u8(_RH_RF95_REG_12_IRQ_FLAGS) & 0x20) >> 5
def send(
self,
data,
*,
keep_listening=False,
destination=None,
node=None,
identifier=None,
flags=None
):
"""Send a string of data using the transmitter.
You can only send 252 bytes at a time
(limited by chip's FIFO size and appended headers).
This appends a 4 byte header to be compatible with the RadioHead library.
The header defaults to using the initialized attributes:
(destination,node,identifier,flags)
It may be temporarily overidden via the kwargs - destination,node,identifier,flags.
Values passed via kwargs do not alter the attribute settings.
The keep_listening argument should be set to True if you want to start listening
automatically after the packet is sent. The default setting is False.
Returns: True if success or False if the send timed out.
"""
# Disable pylint warning to not use length as a check for zero.
# This is a puzzling warning as the below code is clearly the most
# efficient and proper way to ensure a precondition that the provided
# buffer be within an expected range of bounds. Disable this check.
# pylint: disable=len-as-condition
assert 0 < len(data) <= 252
# pylint: enable=len-as-condition
self.idle() # Stop receiving to clear FIFO and keep it clear.
# Fill the FIFO with a packet to send.
self._write_u8(_RH_RF95_REG_0D_FIFO_ADDR_PTR, 0x00) # FIFO starts at 0.
# Combine header and data to form payload
payload = bytearray(4)
if destination is None: # use attribute
payload[0] = self.destination
else: # use kwarg
payload[0] = destination
if node is None: # use attribute
payload[1] = self.node
else: # use kwarg
payload[1] = node
if identifier is None: # use attribute
payload[2] = self.identifier
else: # use kwarg
payload[2] = identifier
if flags is None: # use attribute
payload[3] = self.flags
else: # use kwarg
payload[3] = flags
payload = payload + data
# Write payload.
self._write_from(_RH_RF95_REG_00_FIFO, payload)
# Write length
if self.lora_mode:
self._write_u8(_RH_RF95_REG_22_PAYLOAD_LENGTH, len(payload))
# In unlimited mode for FSK, we don't need to explicitly set packet length
# In fixed mode, we only set the length once
# Turn on transmit mode to send out the packet.
self.transmit()
# Wait for tx done interrupt with explicit polling (not ideal but
# best that can be done right now without interrupts).
start = time.monotonic()
timed_out = False
while not timed_out and not self.tx_done():
if (time.monotonic() - start) >= self.xmit_timeout:
timed_out = True
# Listen again if necessary and return the result packet.
if keep_listening:
self.listen()
else:
# Enter idle mode to stop receiving other packets.
self.idle()
self._clear_interrupt_flags()
return not timed_out
def send_with_ack(self, data):
"""Reliable Datagram mode:
Send a packet with data and wait for an ACK response.
The packet header is automatically generated.
If enabled, the packet transmission will be retried on failure
"""
if self.ack_retries:
retries_remaining = self.ack_retries
else:
retries_remaining = 1
got_ack = False
self.sequence_number = (self.sequence_number + 1) & 0xFF
while not got_ack and retries_remaining:
self.identifier = self.sequence_number
self.send(data, keep_listening=True)
# Don't look for ACK from Broadcast message
if self.destination == _RH_BROADCAST_ADDRESS:
got_ack = True
else:
# wait for a packet from our destination
ack_packet = self.receive(timeout=self.ack_wait, with_header=True)
if ack_packet is not None:
if ack_packet[3] & _RH_FLAGS_ACK:
# check the ID
if ack_packet[2] == self.identifier:
got_ack = True
break
# pause before next retry -- random delay
if not got_ack:
# delay by random amount before next try
time.sleep(self.ack_wait + self.ack_wait * random.random())
retries_remaining = retries_remaining - 1
# set retry flag in packet header
self.flags |= _RH_FLAGS_RETRY
self.flags = 0 # clear flags
return got_ack
# pylint: disable=too-many-branches
def receive(
self, *, keep_listening=True, with_header=False, with_ack=False, timeout=None
):
"""Wait to receive a packet from the receiver. If a packet is found the payload bytes
are returned, otherwise None is returned (which indicates the timeout elapsed with no
reception).
If keep_listening is True (the default) the chip will immediately enter listening mode
after reception of a packet, otherwise it will fall back to idle mode and ignore any
future reception.
All packets must have a 4-byte header for compatibilty with the
RadioHead library.
The header consists of 4 bytes (To,From,ID,Flags). The default setting will strip
the header before returning the packet to the caller.
If with_header is True then the 4 byte header will be returned with the packet.
The payload then begins at packet[4].
If with_ack is True, send an ACK after receipt (Reliable Datagram mode)
"""
timed_out = False
if timeout is None:
timeout = self.receive_timeout
if timeout is not None:
# Wait for the payload_ready signal. This is not ideal and will
# surely miss or overflow the FIFO when packets aren't read fast
# enough, however it's the best that can be done from Python without
# interrupt supports.
# Make sure we are listening for packets.
self.listen()
start = time.monotonic()
timed_out = False
while not timed_out and not self.rx_done():
if (time.monotonic() - start) >= timeout:
timed_out = True
# Payload ready is set, a packet is in the FIFO.
packet = None
# save last RSSI reading
self.last_rssi = self.rssi
# Enter idle mode to stop receiving other packets.
self.idle()
if not timed_out:
if self.enable_crc and self.crc_error():
self.crc_error_count += 1
else:
# Read the data from the FIFO.
# Read the length of the FIFO.
fifo_length = self._read_u8(_RH_RF95_REG_13_RX_NB_BYTES)
# Handle if the received packet is too small to include the 4 byte
# RadioHead header and at least one byte of data --reject this packet and ignore it.
if fifo_length > 0: # read and clear the FIFO if anything in it
current_addr = self._read_u8(_RH_RF95_REG_10_FIFO_RX_CURRENT_ADDR)
self._write_u8(_RH_RF95_REG_0D_FIFO_ADDR_PTR, current_addr)
packet = bytearray(fifo_length)
# Read the packet.
self._read_into(_RH_RF95_REG_00_FIFO, packet)
# Clear interrupt.
self._write_u8(_RH_RF95_REG_12_IRQ_FLAGS, 0xFF)
if fifo_length < 5:
packet = None
else:
if (
self.node != _RH_BROADCAST_ADDRESS
and packet[0] != _RH_BROADCAST_ADDRESS
and packet[0] != self.node
):
packet = None
# send ACK unless this was an ACK or a broadcast
elif (
with_ack
and ((packet[3] & _RH_FLAGS_ACK) == 0)
and (packet[0] != _RH_BROADCAST_ADDRESS)
):
# delay before sending Ack to give receiver a chance to get ready
if self.ack_delay is not None:
time.sleep(self.ack_delay)
# send ACK packet to sender (data is b'!')
self.send(
b"!",
destination=packet[1],
node=packet[0],
identifier=packet[2],
flags=(packet[3] | _RH_FLAGS_ACK),
)
# reject Retries if we have seen this idetifier from this source before
if (self.seen_ids[packet[1]] == packet[2]) and (
packet[3] & _RH_FLAGS_RETRY
):
packet = None
else: # save the packet identifier for this source
self.seen_ids[packet[1]] = packet[2]
if (
not with_header and packet is not None
): # skip the header if not wanted
packet = packet[4:]
# Listen again if necessary and return the result packet.
if keep_listening:
self.listen()
else:
# Enter idle mode to stop receiving other packets.
self.idle()
self._clear_interrupt_flags()
return packet
| 41.918527
| 100
| 0.637371
|
27058b21a545945730e445f15035cd0a4b75aefd
| 8,262
|
py
|
Python
|
redis_cache/rediscache.py
|
fjsj/redis-simple-cache
|
95e2db870bb26121476c2a3dc2714b1ebad12152
|
[
"BSD-3-Clause"
] | null | null | null |
redis_cache/rediscache.py
|
fjsj/redis-simple-cache
|
95e2db870bb26121476c2a3dc2714b1ebad12152
|
[
"BSD-3-Clause"
] | 1
|
2016-02-22T07:39:12.000Z
|
2016-02-22T07:45:54.000Z
|
redis_cache/rediscache.py
|
fjsj/redis-simple-cache
|
95e2db870bb26121476c2a3dc2714b1ebad12152
|
[
"BSD-3-Clause"
] | null | null | null |
"""
A simple redis-cache interface for storing python objects.
"""
from functools import wraps
import pickle
import json
import base64
import hashlib
import redis
import logging
class RedisConnect(object):
'''
A simple object to store and pass database connection information.
This makes the Simple Cache class a little more flexible, for cases
where redis connection configuration needs customizing.
'''
def __init__(self, host=None, port=None, db=None, url=None):
self.host = host if host else 'localhost'
self.port = port if port else 6379
self.db = db if db else 0
self.url = url if url else None
def connect(self):
'''
We cannot assume that connection will succeed, as such we use a ping()
method in the redis client library to validate ability to contact redis.
RedisNoConnException is raised if we fail to ping.
'''
try:
if self.url is None:
redis.StrictRedis(host=self.host, port=self.port).ping()
else:
redis.StrictRedis.from_url(self.url).ping()
except redis.ConnectionError as e:
raise RedisNoConnException, ("Failed to create connection to redis",
(self.host,
self.port)
)
if self.url is None:
return redis.StrictRedis(host=self.host, port=self.port, db=self.db)
else:
return redis.StrictRedis.from_url(self.url)
class CacheMissException(Exception):
pass
class ExpiredKeyException(Exception):
pass
class RedisNoConnException(Exception):
pass
class SimpleCache(object):
def __init__(self, limit=1000, expire=60 * 60 * 24,
hashkeys=False, host=None, port=None, db=None, url=None):
self.limit = limit # No of json encoded strings to cache
self.expire = expire # Time to keys to expire in seconds
## database number, host and port are optional, but passing them to
## RedisConnect object is best accomplished via optional arguments to
## the __init__ function upon instantiation of the class, instead of
## storing them in the class definition. Passing in None, which is a
## default already for database host or port will just assume use of
## Redis defaults.
self.host = host
self.port = port
self.db = db
self.url = url
## We cannot assume that connection will always succeed. A try/except
## clause will assure unexpected behavior and an unhandled exception do not result.
try:
self.connection = RedisConnect(host=self.host, port=self.port, db=0, url=self.url).connect()
except RedisNoConnException, e:
self.connection = None
pass
## There may be instances where we want to create hashes for
## keys to have a consistent length.
self.hashkeys = hashkeys
def make_key(self, key):
return "SimpleCache-%s::%s" % (id(self), key)
def get_set_name(self):
return "SimpleCache-%s-keys" % id(self)
def store(self, key, value, expire=None):
""" Stores a value after checking for space constraints and freeing up space if required """
key = to_unicode(key)
value = to_unicode(value)
set_name = self.get_set_name()
while self.connection.scard(set_name) >= self.limit:
del_key = self.connection.spop(set_name)
self.connection.delete(self.make_key(del_key))
pipe = self.connection.pipeline()
if expire is None:
expire = self.expire
pipe.setex(self.make_key(key), expire, value)
pipe.sadd(set_name, key)
pipe.execute()
def store_json(self, key, value):
self.store(key, json.dumps(value))
def store_pickle(self, key, value):
self.store(key, base64.b64encode(pickle.dumps(value)))
def get(self, key):
key = to_unicode(key)
if key in self:
val = self.connection.get(self.make_key(key))
if val is None: # expired key
self.connection.srem(self.get_set_name(), key)
raise ExpiredKeyException
else:
return val
raise CacheMissException
def get_json(self, key):
return json.loads(self.get(key))
def get_pickle(self, key):
return pickle.loads(base64.b64decode(self.get(key)))
def __contains__(self, key):
return self.connection.sismember(self.get_set_name(), key)
def __len__(self):
return self.connection.scard(self.get_set_name())
def keys(self):
return self.connection.smembers(self.get_set_name())
def flush(self):
keys = self.keys()
pipe = self.connection.pipeline()
for del_key in keys:
pipe.delete(self.make_key(del_key))
pipe.delete(self.get_set_name())
pipe.execute()
def cache_it(limit=1000, expire=60 * 60 * 24, cache=None):
"""
Apply this decorator to cache any function returning a value. Arguments and function result
must be pickleable.
"""
cache_ = cache ## Since python 2.x doesn't have the nonlocal keyword, we need to do this
def decorator(function):
cache = cache_
if cache is None:
cache = SimpleCache(limit, expire, hashkeys=True)
@wraps(function)
def func(*args):
## Handle cases where caching is down or otherwise not available.
if cache.connection is None:
result = function(*args)
return result
## Key will be either a md5 hash or just pickle object,
## in the form of `function name`:`key`
if cache.hashkeys:
key = hashlib.md5(pickle.dumps(args)).hexdigest()
else:
key = pickle.dumps(args)
cache_key = '%s:%s' % (function.__name__, key)
if cache_key in cache:
try:
return cache.get_pickle(cache_key)
except (ExpiredKeyException, CacheMissException) as e:
pass
except:
logging.exception("Unknown redis-simple-cache error. Please check your Redis free space.")
result = function(*args)
cache.store_pickle(cache_key, result)
return result
return func
return decorator
def cache_it_json(limit=1000, expire=60 * 60 * 24, cache=None):
"""
A decorator similar to cache_it, but it serializes the return value to json, while storing
in the database. Useful for types like list, tuple, dict, etc.
"""
cache_ = cache ## Since python 2.x doesn't have the nonlocal keyword, we need to do this
def decorator(function):
cache = cache_
if cache is None:
cache = SimpleCache(limit, expire, hashkeys=True)
@wraps(function)
def func(*args):
## Handle cases where caching is down or otherwise not available.
if cache.connection is None:
result = function(*args)
return result
## Key will be either a md5 hash or just pickle object,
## in the form of `function name`:`key`
if cache.hashkeys:
key = hashlib.md5(json.dumps(args)).hexdigest()
else:
key = json.dumps(args)
cache_key = '%s:%s' % (function.__name__, key)
if cache_key in cache:
try:
return cache.get_json(cache_key)
except (ExpiredKeyException, CacheMissException) as e:
pass
except:
logging.exception("Unknown redis-simple-cache error. Please check your Redis free space.")
result = function(*args)
cache.store_json(cache_key, result)
return result
return func
return decorator
def to_unicode(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
| 34.282158
| 110
| 0.600944
|
545b3f6bac890fe4888f50c316e75ae22889e257
| 3,354
|
py
|
Python
|
main.py
|
feicccccccc/DQN_Pytorch
|
524b1870698873c4c1e72cfa37ba0d341551ffdb
|
[
"MIT"
] | null | null | null |
main.py
|
feicccccccc/DQN_Pytorch
|
524b1870698873c4c1e72cfa37ba0d341551ffdb
|
[
"MIT"
] | null | null | null |
main.py
|
feicccccccc/DQN_Pytorch
|
524b1870698873c4c1e72cfa37ba0d341551ffdb
|
[
"MIT"
] | null | null | null |
"""
Reference and credit:
https://www.youtube.com/watch?v=UlJzzLYgYoE
Main loop
Slightly change to the network layer structure and parameter naming
Change gym wrapper and observation
Add a lot of comment for reference
"""
import gym
import numpy as np
from agent import DQNAgent
from utils import plot_learning_curve, make_env
NUMBER_OF_FRAME = 4
if __name__ == '__main__':
# env = make_env("CartPole-v0")
env = gym.make("CartPole-v1") # same env with different registry
init_screen = env.reset()
best_score = -np.inf
load_checkpoint = False # if user want to restart from checkpoint
greedy_action = False # use behavioural policy / target policy
learn = True
initial_epsilon = 0.5
n_games = 1000 # number of episode
# replace target network with evaluation network after 1000 step
# agent = DQNAgent(gamma=0.99, epsilon=1.0, lr=0.001,
# input_dims=init_screen.shape,
# n_actions=env.action_space.n, mem_size=50000, eps_min=0.2,
# batch_size=64, replace=200, eps_dec=5e-4,
# checkpoint_dir='models/', algo='DQNAgent',
# env_name='CartPole-v0-RGB')
agent = DQNAgent(gamma=0.99, epsilon=initial_epsilon, lr=0.0001,
input_dims=env.observation_space.shape,
n_actions=env.action_space.n, mem_size=50000, eps_min=0.1,
batch_size=128, replace=200, eps_dec=1e-5,
checkpoint_dir='models/', algo='DQNAgent_RewardShaping',
env_name='CartPole-v0-FC', reward_shaping=True)
if load_checkpoint:
agent.load_models()
# For record
n_steps = 0
scores, eps_history, steps_array = [], [], []
for i in range(n_games):
done = False
observation = env.reset()
score = 0
while not done:
env.render()
action = agent.choose_action(observation, greedy_action)
next_observation, reward, done, info = env.step(action)
score += reward
if learn:
# store the transition (s,a,r,s') inside the replay memory
agent.store_transition(observation, action, reward, next_observation, done)
# learn through the experience (if there's enough batches)
agent.learn()
observation = next_observation
n_steps += 1
# After 1 episode finish
# keep record stuff
scores.append(score)
steps_array.append(n_steps)
# Average score from last 100 episode
avg_score = np.mean(scores[-100:])
print('episode: ', i, 'score: ', score,
' average score %.1f' % avg_score, 'best score %.2f' % best_score,
'epsilon %.2f' % agent.epsilon, 'steps', n_steps)
if avg_score > best_score:
if learn:
agent.save_models()
best_score = avg_score
eps_history.append(agent.epsilon)
if load_checkpoint and not learn and n_steps >= 18000:
break
# for graph piloting
fname = agent.algo + '_' + agent.env_name + '_lr' + str(agent.lr) + '_' + str(n_games) + 'games'
figure_file = 'plots/' + fname + '.png'
plot_learning_curve(steps_array, scores, eps_history, figure_file)
| 34.57732
| 100
| 0.607335
|
d80354a9400ef8db18141157402d858b7336aaa8
| 9,565
|
py
|
Python
|
package_managers/security_check.py
|
mikekamornikov/base-images-docker
|
10df42be69f0c4afc87f0df9a0b35fcfb43b5854
|
[
"Apache-2.0"
] | null | null | null |
package_managers/security_check.py
|
mikekamornikov/base-images-docker
|
10df42be69f0c4afc87f0df9a0b35fcfb43b5854
|
[
"Apache-2.0"
] | null | null | null |
package_managers/security_check.py
|
mikekamornikov/base-images-docker
|
10df42be69f0c4afc87f0df9a0b35fcfb43b5854
|
[
"Apache-2.0"
] | null | null | null |
"""Checks the specified image for security vulnerabilities."""
import argparse
import json
import subprocess
import sys
import logging
import yaml
import distutils.version as ver
# Severities
_LOW = 'LOW'
_MEDIUM = 'MEDIUM'
_HIGH = 'HIGH'
_CRITICAL = 'CRITICAL'
_SEV_MAP = {
_LOW: 0,
_MEDIUM: 1,
_HIGH: 2,
_CRITICAL: 3,
}
# Drydock only scans the main repository, but none of the mirrors.
# Swap out any mirrors for gcr.io when scanning.
_CANONICAL_IMAGE_REPOSITORY = {
'l.gcr.io/google': 'launcher.gcr.io/google',
'eu.gcr.io/google-appengine': 'gcr.io/google-appengine',
'us.gcr.io/google-appengine': 'gcr.io/google-appengine',
'asia.gcr.io/google-appengine': 'gcr.io/google-appengine'
}
REPOSITORIES_TO_IGNORE = {
'us-mirror.gcr.io/library'
}
def gcloud_path():
"""Returns the path to the gcloud command. Requires gcloud to be on system PATH"""
return 'gcloud'
def _sub_image(full_image):
repo, image = full_image.rsplit('/', 1)
if repo in REPOSITORIES_TO_IGNORE:
logging.info('Ignoring repository %s', repo)
return None
repo = _CANONICAL_IMAGE_REPOSITORY.get(repo, repo)
new_image = '/'.join((repo, image))
if new_image != full_image:
logging.info('Checking %s instead of %s', new_image, full_image)
return new_image
def _run_gcloud(cmd):
full_cmd = [gcloud_path(), 'alpha', 'container', 'images',
'--format=json'] + cmd
output = subprocess.check_output(full_cmd)
return json.loads(output)
def _find_base_image(image):
"""Finds the base image of the given image.
Args:
image: The name of the image to find the base of.
Returns:
The name of the base image if it exists, otherwise None.
"""
parsed = _run_gcloud(['describe', '--show-image-basis', image])
img = parsed['image_basis_summary'].get('base_images')
if not img:
return None
base_img_url = img[0]['derivedImage']['baseResourceUrl']
base_img = base_img_url[len('https://'):]
return _sub_image(base_img)
def _check_for_vulnz(image, severity, whitelist):
"""Checks drydock for image vulnerabilities.
Args:
image: full name of the docker image
severity: the severity of vulnerability to trigger failure
whitelist: list of CVEs to ignore for this test
Returns:
Map of vulnerabilities, if present.
"""
logging.info('CHECKING %s', image)
unpatched = _check_image(image, severity, whitelist)
if not unpatched:
return unpatched
base_image = _find_base_image(image)
base_image = None
base_unpatched = {}
if base_image:
base_unpatched = _check_image(base_image, severity, whitelist)
else:
logging.info('Could not find base image for %s', image)
count = 0
for k, vuln in unpatched.items():
if k not in base_unpatched.keys():
count += 1
logging.info(format_vuln(vuln))
else:
logging.info('Vulnerability %s exists in the base '
'image. Skipping.', k)
if count > 0:
logging.info('Found %s unpatched vulnerabilities in %s. Run '
'[gcloud alpha container images describe %s] '
'to see the full list.', count, image, image)
return unpatched
def format_vuln(vuln):
"""Formats a vulnerability dictionary into a human-readable string.
Args:
vuln: vulnerability dict returned from drydock.
Returns:
Human readable string.
"""
packages = ''
fixed_packages = ''
for v in vuln['vulnerabilityDetails']['packageIssue']:
packages = ' '.join([packages, '{0} ({1})'.format(
v['affectedLocation']['package'],
_get_version_number(v['affectedLocation']['version']))])
fixed_packages = ' '.join([fixed_packages, '{0} ({1})'.format(
v['fixedLocation']['package'],
_get_version_number(v['fixedLocation']['version']))])
return """
Vulnerability found.
CVE: {0}
SEVERITY: {1}
PACKAGES: {2}
FIXED PACKAGES: {3}
""".format(
vuln['noteName'],
vuln['vulnerabilityDetails']['severity'],
packages,
fixed_packages)
def _check_image(image, severity, whitelist):
"""Checks drydock for image vulnerabilities.
Args:
image: full name of the docker image
severity: the severity of vulnerability to trigger failure
whitelist: list of CVEs to ignore for this test
Returns:
Map of vulnerabilities, if present.
"""
parsed = _run_gcloud(['describe', image, '--show-all-metadata'])
unpatched = {}
vuln_analysis = parsed.get('package_vulnerability_summary', {})
# If there are no fixed vulnz, we can immediately quit.
total_vulnz = vuln_analysis.get('total_vulnerability_found', 0)
unfixed_vulnz = vuln_analysis.get('not_fixed_vulnerability_count', 0)
if total_vulnz <= unfixed_vulnz:
return unpatched
severities = _get_relevant_severities(severity)
vulnz = vuln_analysis['vulnerabilities']
for s in severities:
for v in vulnz.get(s, []):
vuln = v['vulnerabilityDetails']
if not _check_vuln_is_valid(vuln):
continue
if v['noteName'] in whitelist:
continue
unpatched[v['noteName']] = v
return unpatched
def _get_relevant_severities(severity):
return [k for k, v in _SEV_MAP.iteritems()
if v >= _SEV_MAP.get(severity, 1)]
def _check_vuln_is_valid(vuln):
"""Checks whether the given vulnerability is valid.
Args:
vuln: The vulnerability json.
Returns:
boolean, whether it is valid.
"""
for pkg in vuln.get('packageIssue', []):
affected_location = pkg.get('affectedLocation')
fixed_location = pkg.get('fixedLocation')
if affected_location and fixed_location:
# First, make sure the vulnerability is patched
if not fixed_location['version'].get('name'):
return False
# Make sure the fixed version is later than the affected version
affected_version = _get_version_number(affected_location['version'])
fixed_version = _get_version_number(fixed_location['version'])
if not fixed_version:
return False
if ver.LooseVersion(fixed_version) > ver.LooseVersion(affected_version):
return True
logging.info('Vulnerability %s is already fixed. '
'The affected package: %s is greater '
'than the fixed package: %s',
vuln.get('vulnerability'),
affected_version,
fixed_version)
return False
def _get_version_number(version_obj):
# Only name is required for a version, epoch and revision are both optional.
epoch = version_obj.get('epoch', '')
name = version_obj.get('name', '')
revision = version_obj.get('revision', '')
delimiter1 = ':' if epoch else ''
delimiter2 = '-' if revision else ''
return ''.join([str(epoch), delimiter1, name, delimiter2, str(revision)])
def _generate_yaml_output(output_yaml, vulnerabilities):
"""Generate a YAML file mapping the key "tags" to the list of types of
vulnerabilities found.
Args:
output_yaml: Path to the output YAML file to generate.
vulnerabilities: A dictionary mapping the name of the CVE entry to details
about the vulnerability.
"""
tags = set()
for v in vulnerabilities.itervalues():
details = v["vulnerabilityDetails"]
# The service that consumes the metadata expects the tags as follows:
# LOW -> cveLow
# MEDIUM -> sveMedium
# and so on...
sev = str(details['effectiveSeverity'])
tags.add("cve{}".format(sev.lower().capitalize()))
result = {"tags": list(tags)}
logging.info("Creating YAML output {}".format(output_yaml))
with open(output_yaml, "w") as ofp:
ofp.write(yaml.dump(result))
def security_check(image, severity=_MEDIUM, whitelist_file='whitelist.json',
output_yaml=None):
"""Main security check function.
Args:
image: full name of the docker image
severity: the severity of vulnerability to trigger failure
whitelist_file: file with list of whitelisted CVE
output_yaml: Output file which will be populated with a list of types of
vulnerability that exist for the given image.
Returns:
Map of vulnerabilities, if present.
"""
try:
logging.info("Loading whitelist JSON {}".format(whitelist_file))
whitelist = json.load(open(whitelist_file, 'r'))
except IOError:
whitelist = []
logging.info('whitelist=%s', whitelist)
result = _check_for_vulnz(_sub_image(image), severity, whitelist)
if output_yaml:
logging.info("Creating YAML output {}".format(output_yaml))
_generate_yaml_output(output_yaml, result)
return result
def _main():
"""Main."""
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('image', help='The image to test')
parser.add_argument('--severity',
choices=[_LOW, _MEDIUM, _HIGH, _CRITICAL],
default=_MEDIUM,
help='The minimum severity to filter on.')
parser.add_argument('--whitelist-file', dest='whitelist',
help='The path to the whitelist json file',
default='whitelist.json')
parser.add_argument('--output-yaml', dest='output_yaml',
help='The path to the output YAML file to'+\
' generate with a list of tags indicating the types of'+\
' vulnerability fixes available for the given image.')
args = parser.parse_args()
security_check(args.image, args.severity, args.whitelist,
args.output_yaml)
if __name__ == '__main__':
sys.exit(_main())
| 28.897281
| 84
| 0.670988
|
c930346c1f32441029007a6292a2b92c85ec9bfd
| 2,875
|
py
|
Python
|
mdsimulator/obsolete_code/mcmc.py
|
Marsll/md-simulator
|
8c1853420a8dd94da0c440ea359c251dd10413bb
|
[
"Apache-2.0"
] | 1
|
2021-12-29T22:20:57.000Z
|
2021-12-29T22:20:57.000Z
|
mdsimulator/obsolete_code/mcmc.py
|
Marsll/md-simulator
|
8c1853420a8dd94da0c440ea359c251dd10413bb
|
[
"Apache-2.0"
] | null | null | null |
mdsimulator/obsolete_code/mcmc.py
|
Marsll/md-simulator
|
8c1853420a8dd94da0c440ea359c251dd10413bb
|
[
"Apache-2.0"
] | 4
|
2017-12-11T10:58:11.000Z
|
2021-12-29T22:21:03.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from neighbor_list import NeighborList
import scipy.spatial.distance as dist
from lennard_jones import all_lennard_jones_potential
from cell_order import create_cell_order
def mcmc_step(ppos, dims, r_cut, nbs=None, nl=None, alpha=0.1, beta=1000,
epot=None, **kwargs):
if nl is None:
nl = NeighborList(dims, ppos, r_cut)
if nbs is None:
nbs = create_cell_order(r_cut, dims)
if epot is None:
epot = all_lennard_jones_potential(ppos, nl, nbs, r_cut)
ppos_trial = ppos + alpha * (np.random.rand(*ppos.shape) - 0.5)
e_trial = all_lennard_jones_potential(ppos_trial, nl, nbs, r_cut)
diff = 1000
if e_trial < epot or np.random.rand() < np.exp(beta * (epot - e_trial)):
hard_walls(ppos_trial, dims)
diff = np.absolute(epot - e_trial)
nl.update(ppos_trial)
return ppos_trial, e_trial, diff, nbs, nl
return ppos, epot, diff, nbs, nl
def mcmc(ppos, dims, r_cut, alpha=0.1, beta=1000, tol=1E-8, max_steps=1000, **kwargs):
nl = NeighborList(dims, ppos, r_cut)
nbs = create_cell_order(r_cut, dims)
epot = all_lennard_jones_potential(ppos, nl, nbs, r_cut)
diff = 1000
count = 0
while count < max_steps and diff >= tol:
count += 1
ppos, epot, diff, nbs, nl = mcmc_step(
ppos, dims, r_cut, nbs, nl, alpha, beta, epot)
# print(potential,ppos)
#print(count)
return ppos, epot
def hard_walls(ppos, dims):
ppos[ppos <= 0] = 0.1
for i, x in enumerate(ppos.T):
x[x > dims[i]] = dims[i]
def back_map(ppos, box):
for i, length in enumerate(box):
while any(ppos[:,i] >= length):
ppos.T[i][ppos[:,i] >= length] -= length
while any(ppos[:,i] < 0):
ppos.T[i][ppos[:,i] < 0] += length
return ppos
def plot_positions(ppos):
fig = plt.figure()
dims = ppos.shape[1]
if dims == 3:
ax = fig.gca(projection='3d')
ax.scatter(*ppos.T, marker="o")
elif dims == 2:
plt.scatter(*ppos.T, marker="o")
elif dims == 1:
y = np.zeros(ppos.shape[0])
plt.plot(*ppos.T, np.zeros_like(y), "o")
def plot_forces(ppos, forces):
fig = plt.figure()
dims = ppos.shape[1]
if dims == 3:
ax = fig.gca(projection='3d')
ax.quiver(*ppos.T, *forces.T, length=0.1, normalize=True)
elif dims == 2:
plt.quiver(*ppos.T, *forces.T)
elif dims == 1:
plt.quiver(*ppos.T, *forces.T)
def test_mcmc():
"""Three particles in a hard box."""
ppos = np.random.random([3, 3]) * 5
plot_positions(ppos)
dim_box = (10, 10, 10)
finalppos, potential = mcmc(ppos, dim_box, r_cut=5)
plot_positions(finalppos)
#plt.show()
print(potential, finalppos)
test_mcmc()
| 27.644231
| 86
| 0.609043
|
231ed0b23b4e5453bc5767206d0d220c15a92963
| 397
|
py
|
Python
|
test/test_configwidget.py
|
hueyjj/Cherry
|
e81de5e04380a036f15f875101e724b583cc2312
|
[
"MIT"
] | null | null | null |
test/test_configwidget.py
|
hueyjj/Cherry
|
e81de5e04380a036f15f875101e724b583cc2312
|
[
"MIT"
] | null | null | null |
test/test_configwidget.py
|
hueyjj/Cherry
|
e81de5e04380a036f15f875101e724b583cc2312
|
[
"MIT"
] | null | null | null |
from PyQt5 import (
QtWidgets,
)
from PyQt5.QtWidgets import (
QApplication,
)
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cherry_dl.gui.config import Config
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
config = Config()
config.show()
sys.exit(app.exec_())
| 17.26087
| 80
| 0.649874
|
984c6e3f6f89eb4fe200e7474324ea131da1c8f0
| 620
|
py
|
Python
|
users/rules.py
|
gafderks/dbase
|
4089cf220740afd7fcc0ae68fcd6185829a60eae
|
[
"Apache-2.0"
] | null | null | null |
users/rules.py
|
gafderks/dbase
|
4089cf220740afd7fcc0ae68fcd6185829a60eae
|
[
"Apache-2.0"
] | 281
|
2020-04-03T15:22:46.000Z
|
2022-03-31T20:53:28.000Z
|
users/rules.py
|
gafderks/dbase
|
4089cf220740afd7fcc0ae68fcd6185829a60eae
|
[
"Apache-2.0"
] | null | null | null |
import rules
@rules.predicate
def is_own_group(user, group):
"""
Check whether a user belongs to a specified group.
:param User user:
:param Group group: use None for all groups
:return: bool
"""
if group is None:
return False
return user.group == group
@rules.predicate
def view_bookings_from_group(user, group):
"""
Check whether a user may view a specified game.
:param User user:
:param Group group: use None for all groups
:return: bool
"""
return user.has_perm("booking.view_others_groups_bookings") | is_own_group(
user, group
)
| 22.142857
| 79
| 0.659677
|
34a7e88b91436ee9c5eb75759c08d9cce8f5c3dd
| 798
|
py
|
Python
|
tests/programs/absolute_import/foobar/__init__.py
|
augustand/Nuitka
|
b7b9dd50b60505a309f430ce17cad36fb7d75048
|
[
"Apache-2.0"
] | null | null | null |
tests/programs/absolute_import/foobar/__init__.py
|
augustand/Nuitka
|
b7b9dd50b60505a309f430ce17cad36fb7d75048
|
[
"Apache-2.0"
] | null | null | null |
tests/programs/absolute_import/foobar/__init__.py
|
augustand/Nuitka
|
b7b9dd50b60505a309f430ce17cad36fb7d75048
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .foobar import Foobar
| 38
| 79
| 0.718045
|
510ad169e2726470de245ea37bc81319fbd265d2
| 3,869
|
py
|
Python
|
TechnicalAnalysis/lstm_technical.py
|
ShobhitLamba/Stocker
|
a0d97e58faa7a04df500cc3aa11498b839991fcf
|
[
"MIT"
] | null | null | null |
TechnicalAnalysis/lstm_technical.py
|
ShobhitLamba/Stocker
|
a0d97e58faa7a04df500cc3aa11498b839991fcf
|
[
"MIT"
] | null | null | null |
TechnicalAnalysis/lstm_technical.py
|
ShobhitLamba/Stocker
|
a0d97e58faa7a04df500cc3aa11498b839991fcf
|
[
"MIT"
] | null | null | null |
#Part 1 - Data Preprocessing
#Importing the libraries
import numpy as np
import pandas as pd
#from keras.utils import to_categorical
#Importing the training set
dataset_train = pd.read_csv('Datasets/BRK_fin_train.csv')
fin_data = dataset_train.iloc[:, 7:13].values
training_set = dataset_train.iloc[:, 4:5].values
#Feature Scaling
from sklearn.preprocessing import minmax_scale
#sc = minmax_scale(feature_range = (0, 1))
fin_data_scaled = minmax_scale(fin_data, feature_range = (0, 1))
#Creating a data structure with 60 timesteps and 1 output
X_train = []
y_train = []
y_temp = []
for i in range(1, 4482):
# X_train.append(macd_scaled[i-100:i, 0])
y_temp.append(training_set[i-1, 0])
#y_temp = np.insert(y_temp, 0, training_set[0], axis = 0)
#y_temp = np.insert(y_temp, 0, training_set[0], axis = 0)
for i in range(60, len(y_temp)):
if y_temp[i-60] > y_temp[i]:
y_train.append(0)
else:
y_train.append(1)
fin_data_scaled = fin_data_scaled[61:]
X_train = np.array(fin_data_scaled)
# Reshaping
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
#Part 2 - Building the model
#Importing the keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv1D, BatchNormalization, Activation
from keras.layers import LSTM, Dropout
# Initializing the RNN
regressor = Sequential()
regressor.add(Conv1D(filters = 128, kernel_size = 1, input_shape = (X_train.shape[1], 1)))
regressor.add(BatchNormalization())
regressor.add(Activation('relu'))
# Adding the LSTM layers and some Dropout regularisation
regressor.add(LSTM(units = 100, return_sequences = True))
regressor.add(Dropout(0.5))
regressor.add(LSTM(units = 100))
regressor.add(Dropout(0.2))
# Adding the output layer
regressor.add(Dense(units = 1, activation = 'sigmoid'))
#Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'binary_crossentropy')
regressor.summary()
#Fitting the RNN to the training set
regressor.fit(X_train, y_train, epochs = 100, batch_size = 32)
# Getting the real stock price of 2018
dataset_test = pd.read_csv('Datasets/BRK_fin_test.csv')
real_stock_price = dataset_test.iloc[:, 4:5].values
# Getting the predictions
dataset_total_y = np.insert(real_stock_price, 0, 169.23, axis = 0)
inputs_x = dataset_test.iloc[:, 7:13].values
inputs_x = minmax_scale(inputs_x, feature_range = (0, 1))
inputs_y = dataset_total_y.reshape(-1,1)
X_test = []
y_test_temp = []
y_test = []
for i in range(1, 198):
y_test_temp.append(inputs_y[i-1, 0])
inputs_x = inputs_x[61:]
X_test = np.array(inputs_x)
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
predicted = regressor.predict_classes(X_test)
#y_test_temp = np.insert(y_test_temp, 0, real_stock_price[0], axis = 0)
#y_test_temp = np.insert(y_test_temp, 0, real_stock_price[0], axis = 0)
for i in range(60, len(y_test_temp)):
if y_test_temp[i-60] > y_test_temp[i]:
y_test.append(0)
else:
y_test.append(1)
# Checking performance
from sklearn.metrics import precision_recall_fscore_support as score
precision, recall, fscore, support = score(y_test, predicted)
count = 0
for i in range(len(y_test)):
if(y_test[i] == predicted[i]):
count+=1
print('accuracy: ', count/len(y_test))
print('precision: {}'.format(precision))
print('recall: {}'.format(recall))
print('fscore: {}'.format(fscore))
print('support: {}'.format(support))
import csv
rows = []
for i in range(len(y_test)):
rows.append([str(y_test[i]), str(predicted[i])])
for row in rows:
row[0] = row[0].replace('[', '').replace(']', '')
row[1] = row[1].replace('[', '').replace(']', '')
with open('BRK_60_pred.csv', 'w') as aapl:
wr = csv.writer(aapl, lineterminator='\n')
for row in rows:
# print(row)
wr.writerow(row)
| 28.659259
| 90
| 0.704575
|
06482367abb1ed7c90c9462f63556a19409f0b22
| 3,981
|
py
|
Python
|
sites/root/pysvr/svr.py
|
justletterh/hreqdotxyz
|
6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9
|
[
"CC0-1.0"
] | null | null | null |
sites/root/pysvr/svr.py
|
justletterh/hreqdotxyz
|
6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9
|
[
"CC0-1.0"
] | null | null | null |
sites/root/pysvr/svr.py
|
justletterh/hreqdotxyz
|
6f56bb3c6f9e1a0475b5ac3995ec02c083db17e9
|
[
"CC0-1.0"
] | null | null | null |
from aiohttp import web
import psutil
import platform
from datetime import datetime
import sys
import subprocess
import asyncio
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
async def nginx():
ng = subprocess.Popen(['nginx -v'],stdout=subprocess.PIPE,universal_newlines=True,shell=True)
out = str(ng.stdout.read())
if out.endswith("\n"):
out = out[0:len(out)-1]
if out == '':
out = 'nginx version: nginx/1.14.2'
return out
async def apt():
ap = subprocess.Popen(['apt -v'],stdout=subprocess.PIPE,universal_newlines=True,shell=True)
out = str(ap.stdout.read())
if out.endswith("\n"):
out = out[0:len(out)-1]
return out
async def nano():
na = subprocess.Popen(['nano --version'],stdout=subprocess.PIPE,universal_newlines=True,shell=True)
out = str(na.stdout.read())
if out.endswith("\n"):
out = out[0:len(out)-1]
return out
app = web.Application()
routes = web.RouteTableDef()
@routes.get('/')
async def get_handler(request):
return web.Response(text="h")
@routes.get('/status')
async def status_handler(request):
auth = "PASSWORD"
if str(request.headers['auth']) == auth:
uname = platform.uname()
os = f"{uname.system}"
node = f"{uname.node}"
release = f"{uname.release}"
ver = f"{uname.version}"
arch = f"{uname.machine}"
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
start = f"{bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}"
system = {"os": os, "node": node, "release": release, "ver": ver, "arch": arch, "start": start}
cpufreq = psutil.cpu_freq()
phys = str(psutil.cpu_count(logical=False))
ctotal = str(psutil.cpu_count(logical=True))
curfreq = f"{round(cpufreq.current,2)}Mhz"
use = f"{psutil.cpu_percent()}%"
cpu = {"curfreq": curfreq, "phys": phys, "total": ctotal, "curfreq": curfreq, "use": use}
svmem = psutil.virtual_memory()
mtotal = f"{get_size(svmem.total)}"
avaliable = f"{get_size(svmem.available)}"
used = f"{get_size(svmem.used)}"
percnt = f"{svmem.percent}%"
swap = psutil.swap_memory()
swp = {'total':f"{get_size(swap.total)}", 'free':f"{get_size(swap.free)}", 'used':f"{get_size(swap.used)}", 'percnt':f"{swap.percent}%"}
mem = {'total':mtotal, 'avaliable':avaliable, 'used':used, 'percnt':percnt, 'swap':swp}
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
if interface_name == "eth0":
if str(address.family) == 'AddressFamily.AF_INET':
global name, ip, mask, bip
name = f"{interface_name}"
ip = f"{address.address}"
mask = f"{address.netmask}"
bip = f"{address.broadcast}"
net = {'name':name, 'ip':ip, 'mask':mask, 'bip':bip}
net_io = psutil.net_io_counters()
bsent = f"{get_size(net_io.bytes_sent)}"
brcved = f"{get_size(net_io.bytes_recv)}"
io = {'sent':bsent, 'rcved':brcved}
inf = {'sys':system, 'cpu':cpu, 'mem':mem, 'net':net, 'io':io}
py = {'ver':str(sys.version), 'verinf':str(sys.version_info)}
otherver = {'nginx':await nginx(), 'apt':await apt(), 'nano':await nano()}
dat = {'sys':inf, 'py':py, 'other-versions':otherver}
return web.json_response(dat)
if str(request.headers['auth']) != auth:
raise web.HTTPUnauthorized()
app.add_routes(routes)
web.run_app(app)
| 41.041237
| 144
| 0.583522
|
d0899428ddae7ec7cbacbcba6df1643c51b7e918
| 4,270
|
py
|
Python
|
datasets/ms_terms/ms_terms.py
|
zidingz/datasets
|
02edd9ebc79f715adb1c718d1439fda83dc356f1
|
[
"Apache-2.0"
] | 2
|
2021-11-14T09:11:43.000Z
|
2021-11-14T10:07:49.000Z
|
datasets/ms_terms/ms_terms.py
|
zidingz/datasets
|
02edd9ebc79f715adb1c718d1439fda83dc356f1
|
[
"Apache-2.0"
] | 8
|
2021-10-01T12:00:18.000Z
|
2021-10-08T18:50:05.000Z
|
datasets/ms_terms/ms_terms.py
|
zidingz/datasets
|
02edd9ebc79f715adb1c718d1439fda83dc356f1
|
[
"Apache-2.0"
] | 4
|
2021-07-25T17:09:39.000Z
|
2022-02-12T03:30:08.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Microsoft Terminology Collection."""
import os
import xml.etree.ElementTree as ElementTree
import datasets
_DESCRIPTION = """\
The Microsoft Terminology Collection can be used to develop localized versions of applications that integrate with Microsoft products.
It can also be used to integrate Microsoft terminology into other terminology collections or serve as a base IT glossary
for language development in the nearly 100 languages available. Terminology is provided in .tbx format, an industry standard for terminology exchange.
"""
_LICENSE = """\
See the Microsoft Language Portal Materials License and the Microsoft Terms of Use for details.
"""
_ENTRY_ID = "entry_id"
_TERM_SOURCE = "term_source"
_TERM_POS = "pos"
_TERM_DEFINITION = "definition"
_TERM_TARGET = "term_target"
_FILENAME = "MicrosoftTermCollection.tbx"
class MsTerms(datasets.GeneratorBasedBuilder):
"""The Microsoft Terminology Collection."""
VERSION = datasets.Version("1.0.0")
@property
def manual_download_instructions(self):
return """\
You need to go to https://www.microsoft.com/en-us/language/terminology,
and manually download the language of your interest. Once it is completed,
a file named MicrosoftTermCollection.tbx will be appeared in your Downloads folder
or whichever folder your browser chooses to save files to.
You can then move MicrosoftTermCollection.tbx under <path/to/folder>.
The <path/to/folder> can e.g. be "~/manual_data".
ms_terms can then be loaded using the following command `datasets.load_dataset("ms_terms", data_dir="<path/to/folder>")`.
"""
def _info(self):
feature_names = [_ENTRY_ID, _TERM_SOURCE, _TERM_POS, _TERM_DEFINITION, _TERM_TARGET]
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({k: datasets.Value("string") for k in feature_names}),
supervised_keys=None,
homepage="https://www.microsoft.com/en-us/language/terminology",
citation="",
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path_to_manual_file = os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), _FILENAME)
if not os.path.exists(path_to_manual_file):
raise FileNotFoundError(
"{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('ms_terms', data_dir=...)` that includes a file name {}. Manual download instructions: {})".format(
path_to_manual_file, _FILENAME, self.manual_download_instructions
)
)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"path": path_to_manual_file})]
def _generate_examples(self, path=None, title_set=None):
tree = ElementTree.parse(path)
root = tree.getroot()
for i, entry in enumerate(root.findall(".//termEntry")):
entry_id = entry.attrib.get("id")
langsets = entry.findall("./langSet")
if len(langsets) != 2:
continue
term_source = langsets[0].find(".//term").text
term_definition = langsets[0].find(".//descrip").text
term_pos = langsets[0].find(".//termNote").text
term_target = langsets[1].find(".//term").text
yield i, {
_ENTRY_ID: entry_id,
_TERM_SOURCE: term_source,
_TERM_POS: term_pos,
_TERM_DEFINITION: term_definition,
_TERM_TARGET: term_target,
}
| 41.862745
| 196
| 0.689461
|
25d8a6f61e742af12eba152f009869f2e1f9fe11
| 1,208
|
py
|
Python
|
job_trigger.py
|
ppiorunski/p4python
|
1047fdb1b45cf1129f8271f75ded3669d6e74607
|
[
"BSD-3-Clause"
] | null | null | null |
job_trigger.py
|
ppiorunski/p4python
|
1047fdb1b45cf1129f8271f75ded3669d6e74607
|
[
"BSD-3-Clause"
] | null | null | null |
job_trigger.py
|
ppiorunski/p4python
|
1047fdb1b45cf1129f8271f75ded3669d6e74607
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function
import glob, sys, os
pathToBuild = glob.glob('../build/lib*')
if len(pathToBuild) > 0:
versionString = "%d.%d" % (sys.version_info[0], sys.version_info[1])
for i in pathToBuild:
if versionString in i:
sys.path.insert(0, os.path.realpath(i))
pathToBuild = glob.glob('/tmp/p4python*')
if len(pathToBuild) > 0:
sys.path.insert(0, pathToBuild[0])
import P4
def run_trigger(specdef, formname, formfile):
p4 = P4.P4()
try:
p4.define_spec('job', specdef)
with open(formfile) as f:
content = f.read()
parsed = p4.parse_job(content)
parsed._status = "suspended"
content = p4.format_job(parsed)
with open(formfile, "w") as f:
f.write(content)
except Exception as e:
print("Received exception : {}".format(e))
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage : job_trigger.py specdef formname formfile")
sys.exit(1)
specdef = sys.argv[1]
formname = sys.argv[2]
formfile = sys.argv[3]
run_trigger(specdef, formname, formfile)
| 24.16
| 76
| 0.594371
|
a07199b4cb51ce0cd901d442b6f8c7a7ed3134fd
| 3,704
|
py
|
Python
|
zhihu_img_spaider/zhihu_spider.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
zhihu_img_spaider/zhihu_spider.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
zhihu_img_spaider/zhihu_spider.py
|
saberbin/small_spider
|
d3c14681a5bb2b068fe5afb23d637d21b8fa76aa
|
[
"Apache-2.0"
] | null | null | null |
import random
import time
import requests
from bs4 import BeautifulSoup
import os
# 请求头
HEADERS = (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:33.0) Gecko/20120101 Firefox/33.0',
'Mozilla/5.0 (MSIE 10.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A'
)
def make_cookies(cookies_path):
"""
函数功能:构造特定用户的cookies
参数:保存cookies的txt文件目录
返回:cookies的字典对象
"""
cookies = dict()
with open(cookies_path, 'r')as f:
data = f.read()
for line in data.split(';'):
key, value = line.split('=', 1) # 指定分割次数为1,否则会报错
cookies[key] = value
return cookies
def get_html(url, cookies=None, headers=None):
"""
函数功能:向服务器请求特定url的页面
参数:
url:链接地址
cookies:该网站的用户的cookies
headers:构造的请求头
返回:响应的文本内容,即HTML数据
"""
global HEADERS
if headers is None:
headers = random.choice(HEADERS)
try:
r = requests.get(url, headers={'User-Agent': headers}, cookies=cookies)
r.encoding = r.apparent_encoding
return r.text
except Exception as e:
print(e)
return None
def get_img_content(img_url):
"""
函数功能:向服务器请求图片数据
参数:
img_url:图片的链接地址
返回:图片的内容,即图片的二进制数据
"""
header2 = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
try:
r = requests.get(img_url, headers={'User-Agent': header2})
return r.content
except Exception as e:
print(e)
def create_folder(folder_path):
"""
函数功能:创建目录,如果该目录已存在则直接返回
参数:
folder_path:需要创建的目录
返回:无
"""
if os.path.exist(folder_path):
print("该文件夹已存在")
return
else:
os.mkdir(folder_path)
def download_img(img_url, file_path=None, file_name=None):
"""
函数功能:下载图片
参数:
img_url:图片的url链接
file_path:要保存的图片的目录
file_name:保存的图片的名称
返回:无
"""
img_content = get_img_content(img_url)
if file_path is None:
if file_name is None:
file_name = img_url.split('/')[-1]
file_path = ''
with open(file_path + file_name, 'wb')as f:
f.write(img_content)
def main():
url = 'https://zhuanlan.zhihu.com/p/89702201'
cookies_path = 'zhihu_cookies.txt'
# 保存图片的路径,这里使用相对路径
img_path = './img/'
# 从文件中读取并构造字典对象的cookies
cookies = make_cookies(cookies_path)
# 构造请求头
headers = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'
# 获取网页的HTML数据
html = get_html(url, cookies=cookies, headers=headers)
if html is None:
return
# 转换为bs4对象
try:
soup = BeautifulSoup(html, 'html.parser')
print(soup.title)
except Exception as e:
print(e)
# 获取img标签并去重
imgs = tuple(soup.find_all('img', class_='origin_image zh-lightbox-thumb lazy'))
# 解析出标签中的图片链接
imgs = [i['data-actualsrc'] for i in imgs]
# 创建保存图片的路径
create_folder(img_path)
# 遍历图片的url列表,取出每张图片的链接,下载图片
for img in imgs:
# 默认不传入名字,使用网站的图片链接的名字保存图片
download_img(img, file_path=img_path, file_name=None)
time.sleep(random.randint(3, 6)) # 随机让程序休眠3-6分钟,给服务器喘口气
print('download over.')
if __name__ == '__main__':
main()
| 26.457143
| 131
| 0.6277
|
89eafa1f83ca26b0d61763b8ce49a8a9c482e95e
| 3,278
|
py
|
Python
|
pebble_tool/commands/transcription_server.py
|
tulth/pebble-tool
|
8b4771bbd7f8dacc8cbe776aaaf0aa704e152f9f
|
[
"MIT"
] | 24
|
2015-07-25T07:36:23.000Z
|
2020-11-05T13:55:06.000Z
|
pebble_tool/commands/transcription_server.py
|
tulth/pebble-tool
|
8b4771bbd7f8dacc8cbe776aaaf0aa704e152f9f
|
[
"MIT"
] | 21
|
2015-08-11T19:55:57.000Z
|
2020-05-01T14:07:45.000Z
|
pebble_tool/commands/transcription_server.py
|
tulth/pebble-tool
|
8b4771bbd7f8dacc8cbe776aaaf0aa704e152f9f
|
[
"MIT"
] | 39
|
2015-10-04T06:21:52.000Z
|
2021-12-22T00:36:34.000Z
|
__author__ = 'andrews'
from enum import IntEnum
from time import sleep
import threading
import re
import logging
from libpebble2.services.voice import *
from .base import PebbleCommand
logger = logging.getLogger("pebble_tool.commands.transcription_server")
mapping = {
'connectivity': TranscriptionResult.FailNoInternet,
'disabled': SetupResult.FailDisabled,
'no-speech-detected': TranscriptionResult.FailSpeechNotRecognized, # works because there's no mic on qemu.
}
class TranscriptionServer(PebbleCommand):
''' Starts a voice server listening for voice transcription requests from the app '''
command = 'transcribe'
def _send_result(self):
self._voice_service.send_stop_audio()
if isinstance(self._error, TranscriptionResult):
result = self._error
else:
result = TranscriptionResult.Success
self._voice_service.send_dictation_result(result=result, sentences=[self._words], app_uuid=self._app_uuid)
def _handle_session_setup(self, app_uuid, encoder_info):
RESULT_DELAY = 4
if self._timer is not None:
self._timer.cancel()
self._app_uuid = app_uuid
if isinstance(self._error, SetupResult):
result = self._error
else:
result = SetupResult.Success
self._voice_service.send_session_setup_result(result, self._app_uuid)
if result == SetupResult.Success:
self._timer = threading.Timer(RESULT_DELAY, self._send_result)
self._timer.start()
def _handle_audio_stop(self):
if self._timer is not None:
self._timer.cancel()
self._send_result()
def __call__(self, args):
super(TranscriptionServer, self).__call__(args)
if args.error is not None:
self._error = mapping[args.error]
else:
self._error = None
self._voice_service = VoiceService(self.pebble)
self._timer = None
# Separate the sentence into individual words. Punctuation marks are treated as words
if args.transcription:
stripped = [w.strip() for w in re.split(r'(\W)', args.transcription) if w.strip() != '']
# prefix punctuation marks with backspace character
self._words = [(z if re.match(r'\w', z) else '\b' + z) for z in stripped]
else:
self._words = []
self._voice_service.register_handler("session_setup", self._handle_session_setup)
self._voice_service.register_handler("audio_stop", self._handle_audio_stop)
logger.debug("Transcription server listening")
try:
while True:
sleep(1)
except KeyboardInterrupt:
return
@classmethod
def add_parser(cls, parser):
parser = super(TranscriptionServer, cls).add_parser(parser)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('transcription', nargs='?', type=str,
help="Transcribed message to send in the dictation result")
group.add_argument('--error', type=str, nargs='?', choices=mapping.keys(),
help='Error code to respond with, if simulating a failure.')
return parser
| 35.630435
| 114
| 0.655278
|
7000222fba9b15c2ea27806d7fca6f997528819e
| 2,049
|
py
|
Python
|
src/main/devrepo/register.py
|
random-python/devrepo
|
4917c2d55e16d4942c6d8f4b2c57138822e3cfcc
|
[
"Apache-2.0"
] | 1
|
2020-06-20T15:18:17.000Z
|
2020-06-20T15:18:17.000Z
|
src/main/devrepo/register.py
|
random-python/devrepo
|
4917c2d55e16d4942c6d8f4b2c57138822e3cfcc
|
[
"Apache-2.0"
] | null | null | null |
src/main/devrepo/register.py
|
random-python/devrepo
|
4917c2d55e16d4942c6d8f4b2c57138822e3cfcc
|
[
"Apache-2.0"
] | null | null | null |
"""
Repository manager
"""
import os
import sys
import inspect
import logging
from devrepo.config import CONFIG
logger = logging.getLogger(__name__)
def env_get(name: str) -> str:
return os.environ.get(name, None)
def env_set(name: str, value: str) -> None:
os.environ[name] = value
class PythonPath():
"""
Inject project source during development
"""
python_key = 'PYTHONPATH'
@staticmethod
def value() -> str:
return env_get(PythonPath.python_key)
@staticmethod
def inject_source(project_source:str) -> None:
logger.info(f"inject_source: {project_source}")
PythonPath.inject_enviro_source(project_source)
PythonPath.inject_syspath_source(project_source)
@staticmethod
def inject_enviro_source(project_source:str) -> None:
python_path = env_get(PythonPath.python_key)
if python_path and not project_source in python_path:
python_path = f"{project_source}:{python_path}"
else:
python_path = f"{project_source}"
env_set(PythonPath.python_key, python_path)
@staticmethod
def inject_syspath_source(project_source:str) -> None:
if not project_source in sys.path:
sys.path.insert(0, project_source)
def register_repository():
"""
Register development repository packages
"""
this_stack = inspect.stack()
caller_info = this_stack[1]
caller_file = caller_info[1]
caller_path = os.path.abspath(caller_file)
logger.info(f"register_repository: caller_path: {caller_path}")
source_list = CONFIG.get_list('layout', 'source_list')
for source in source_list:
if source in caller_path:
project_root = caller_path.split(source)[0]
for source in source_list:
project_source = f"{project_root}{source}"
PythonPath.inject_source(project_source)
logger.info(f"register_repository: env_path: {PythonPath.value()}")
logger.info(f"register_repository: sys_path: {sys.path}")
| 26.269231
| 71
| 0.681308
|
b01881e33decb15009d588ee6f02bba8c6149366
| 1,705
|
py
|
Python
|
src/cfnlint/rules/functions/Base64.py
|
joenye/cfn-python-lint
|
837aac504c58fca4a8ca0211bc57ec920c772bb4
|
[
"MIT-0"
] | 1
|
2020-05-08T20:12:31.000Z
|
2020-05-08T20:12:31.000Z
|
src/cfnlint/rules/functions/Base64.py
|
joenye/cfn-python-lint
|
837aac504c58fca4a8ca0211bc57ec920c772bb4
|
[
"MIT-0"
] | null | null | null |
src/cfnlint/rules/functions/Base64.py
|
joenye/cfn-python-lint
|
837aac504c58fca4a8ca0211bc57ec920c772bb4
|
[
"MIT-0"
] | null | null | null |
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class Base64(CloudFormationLintRule):
"""Check if Base64 values are correct"""
id = 'E1021'
shortdesc = 'Base64 validation of parameters'
description = 'Making sure the function not is of list'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-base64.html'
tags = ['functions', 'base64']
def match(self, cfn):
"""Check CloudFormation Base64"""
matches = []
base64_objs = cfn.search_deep_keys('Fn::Base64')
for base64_obj in base64_objs:
tree = base64_obj[:-1]
value_obj = base64_obj[-1]
if isinstance(value_obj, dict):
if len(value_obj) == 1:
for key, _ in value_obj.items():
if key == 'Fn::Split':
message = 'Base64 needs a string at {0}'
matches.append(RuleMatch(
tree[:], message.format('/'.join(map(str, tree)))))
else:
message = 'Base64 needs a string not a map or list at {0}'
matches.append(RuleMatch(
tree[:], message.format('/'.join(map(str, tree)))))
elif not isinstance(value_obj, six.string_types):
message = 'Base64 needs a string at {0}'
matches.append(RuleMatch(
tree[:], message.format('/'.join(map(str, tree)))))
return matches
| 38.75
| 122
| 0.565396
|
98023b70c231af05199ed6e8d86a27b1b2f45883
| 1,898
|
py
|
Python
|
test/test_AliasParser.py
|
rg911/catbuffer
|
a5eeb103d55fede452d8f1f4bb65a447a4c373e8
|
[
"MIT"
] | 1
|
2019-08-18T15:12:29.000Z
|
2019-08-18T15:12:29.000Z
|
test/test_AliasParser.py
|
rg911/catbuffer
|
a5eeb103d55fede452d8f1f4bb65a447a4c373e8
|
[
"MIT"
] | 1
|
2019-08-06T23:06:02.000Z
|
2019-08-06T23:06:02.000Z
|
test/test_AliasParser.py
|
rg911/catbuffer
|
a5eeb103d55fede452d8f1f4bb65a447a4c373e8
|
[
"MIT"
] | null | null | null |
# pylint: disable=invalid-name
import unittest
from test.constants import BUILTIN_TYPE_TUPLES, VALID_USER_TYPE_NAMES, INVALID_USER_TYPE_NAMES
from test.ParserTestUtils import SingleLineParserTestUtils, ParserFactoryTestUtils
from catparser.AliasParser import AliasParserFactory
class AliasParserFactoryTest(unittest.TestCase):
def test_is_match_returns_true_for_positives(self):
# Assert:
ParserFactoryTestUtils(AliasParserFactory, self).assert_positives([
'using A = foo', 'using ^ = $$$', 'using A90zaZa = te$t'
])
def test_is_match_returns_false_for_negatives(self):
# Assert:
ParserFactoryTestUtils(AliasParserFactory, self).assert_negatives([
' using A = foo', 'using A = foo ', 'import A = foo', 'using A = foo bar', 'using A B = foo bar'
])
class AliasParserTest(unittest.TestCase):
def test_can_parse_builtin_as_alias(self):
for builtin_tuple in BUILTIN_TYPE_TUPLES:
# Act + Assert:
SingleLineParserTestUtils(AliasParserFactory, self).assert_parse(
'using Age = {0}'.format(builtin_tuple[0]),
('Age', {'type': 'byte', 'signedness': builtin_tuple[2], 'size': builtin_tuple[1]}))
def test_alias_names_must_have_type_name_semantics(self):
# Assert:
SingleLineParserTestUtils(AliasParserFactory, self).assert_naming(
'using {0} = uint32',
VALID_USER_TYPE_NAMES,
INVALID_USER_TYPE_NAMES)
def test_cannot_parse_invalid_alias(self):
# Arrange:
SingleLineParserTestUtils(AliasParserFactory, self).assert_parse_exceptions([
'using Hash256 = binary_fixed(2x22)', # malformed number
'using Hash256 = binary_fixed(x)', # malformed number
'using Age = uint33', # unknown type
'using Age = FooBar' # user type
])
| 42.177778
| 108
| 0.674921
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.