hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9fe2657c1d2c952493c5608c832e57f3ae6a904a
| 3,031
|
py
|
Python
|
track/object_position.py
|
seeing-things/track
|
a8faea4bad93948100347a4604f1f0b78edff616
|
[
"MIT"
] | 4
|
2019-06-11T20:46:03.000Z
|
2019-10-28T17:52:45.000Z
|
track/object_position.py
|
bgottula/track
|
a8faea4bad93948100347a4604f1f0b78edff616
|
[
"MIT"
] | 114
|
2017-06-05T06:51:22.000Z
|
2019-01-20T23:01:29.000Z
|
track/object_position.py
|
bgottula/track
|
a8faea4bad93948100347a4604f1f0b78edff616
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Prints the position of an object from observer's location."""
import sys
import datetime
import math
import ephem
import ephem.stars
import track
from track import gps_client
def main():
"""See module docstring at the top of this file."""
parser = track.ArgParser()
parser.add_argument(
'--timestamp',
required=False,
help='UNIX timestamp',
type=float)
subparsers = parser.add_subparsers(title='modes', dest='mode')
parser_star = subparsers.add_parser('star', help='named star mode')
parser_star.add_argument('name', help='name of star')
parser_star = subparsers.add_parser('solarsystem', help='named solar system body mode')
parser_star.add_argument('name', help='name of planet or moon')
gps_client.add_program_arguments(parser)
args = parser.parse_args()
# Create a PyEphem Observer object
location = gps_client.make_location_from_args(args)
observer = ephem.Observer()
observer.lat = location.lat.deg
observer.lon = location.lon.deg
observer.elevation = location.height.value
# Get the PyEphem Body object corresonding to the given named star
if args.mode == 'star':
print('In named star mode: looking up \'{}\''.format(args.name))
target = None
for name, _ in ephem.stars.stars.items():
if args.name.lower() == name.lower():
print('Found named star: \'{}\''.format(name))
target = ephem.star(name)
break
if target is None:
raise Exception('The named star \'{}\' isn\' present in PyEphem.'.format(args.name))
# Get the PyEphem Body object corresonding to the given named solar system body
elif args.mode == 'solarsystem':
print('In named solar system body mode: looking up \'{}\''.format(args.name))
# pylint: disable=protected-access
ss_objs = [name.lower() for _, _, name in ephem._libastro.builtin_planets()]
if args.name.lower() in ss_objs:
body_type = None
for attr in dir(ephem):
if args.name.lower() == attr.lower():
body_type = getattr(ephem, attr)
print('Found solar system body: \'{}\''.format(attr))
break
assert body_type is not None
target = body_type()
else:
raise Exception(
'The solar system body \'{}\' isn\'t present in PyEphem.'.format(args.name)
)
else:
print('You must specify a target.')
sys.exit(1)
if args.timestamp is not None:
observer.date = ephem.Date(datetime.datetime.utcfromtimestamp(args.timestamp))
else:
observer.date = ephem.Date(datetime.datetime.utcnow())
target.compute(observer)
position = {
'az': target.az * 180.0 / math.pi,
'alt': target.alt * 180.0 / math.pi,
}
print('Expected position: ' + str(position))
if __name__ == "__main__":
main()
| 32.945652
| 96
| 0.615308
|
1def122a4bbeb961e12b9da82f780451743bba17
| 1,362
|
py
|
Python
|
examples/paraphrase_mining.py
|
shibing624/sbert
|
2c53125e1c16453c15444a8219cc6bf458327891
|
[
"Apache-2.0"
] | 2
|
2021-08-14T08:58:17.000Z
|
2021-08-14T10:12:58.000Z
|
examples/paraphrase_mining.py
|
shibing624/sbert
|
2c53125e1c16453c15444a8219cc6bf458327891
|
[
"Apache-2.0"
] | null | null | null |
examples/paraphrase_mining.py
|
shibing624/sbert
|
2c53125e1c16453c15444a8219cc6bf458327891
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
Paraphrase mining is the task of finding pharaphrases (texts with identical / similar meaning)
in a large corpus of sentences. In Semantic Textual Similarity we saw a simplified version of
finding paraphrases in a list of sentences. The approach presented there used a brute-force
approach to score and rank all pairs.
However, as this has a quadratic runtime, it fails to scale to large (10,000 and more) collections
of sentences.
For larger collections, util offers the paraphrase_mining function
"""
import sys
sys.path.append('..')
from sbert import SBert, util
model = SBert('paraphrase-MiniLM-L6-v2')
# Single list of sentences - Possible tens of thousands of sentences
sentences = ['The cat sits outside',
'A man is playing guitar',
'I love pasta',
'The new movie is awesome',
'The cat plays in the garden',
'A woman watches TV',
'The new movie is so great',
'Do you like pizza?']
sentence_embeddings = model.encode(sentences)
paraphrases = util.paraphrase_mining(sentence_embeddings, corpus_chunk_size=len(sentences), top_k=3)
for paraphrase in paraphrases[0:10]:
score, i, j = paraphrase
print("{} \t\t {} \t\t Score: {:.4f}".format(sentences[i], sentences[j], score))
| 33.219512
| 100
| 0.696769
|
106c922d0c0224ac70f011f613426b77fd316cee
| 17,549
|
py
|
Python
|
haystack/indexes.py
|
speedplane/django-haystack
|
4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96
|
[
"BSD-3-Clause"
] | 1
|
2017-10-12T14:25:06.000Z
|
2017-10-12T14:25:06.000Z
|
haystack/indexes.py
|
speedplane/django-haystack
|
4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96
|
[
"BSD-3-Clause"
] | 2
|
2021-02-08T21:04:45.000Z
|
2021-03-31T20:11:18.000Z
|
haystack/indexes.py
|
speedplane/django-haystack
|
4ace30aea6aa1b1708f79a5a9df20a00fa0b4d96
|
[
"BSD-3-Clause"
] | 1
|
2020-05-03T20:43:17.000Z
|
2020-05-03T20:43:17.000Z
|
from __future__ import unicode_literals
import copy
import threading
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.six import with_metaclass
from haystack import connections, connection_router
from haystack.constants import ID, DJANGO_CT, DJANGO_ID, Indexable, DEFAULT_ALIAS
from haystack.fields import *
from haystack.manager import SearchIndexManager
from haystack.utils import get_identifier, get_facet_field_name
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['fields'] = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, SearchIndex)]
# Simulate the MRO.
parents.reverse()
for p in parents:
fields = getattr(p, 'fields', None)
if fields:
attrs['fields'].update(fields)
except NameError:
pass
# Build a dictionary of faceted fields for cross-referencing.
facet_fields = {}
for field_name, obj in attrs.items():
# Only need to check the FacetFields.
if hasattr(obj, 'facet_for'):
if not obj.facet_for in facet_fields:
facet_fields[obj.facet_for] = []
facet_fields[obj.facet_for].append(field_name)
built_fields = {}
for field_name, obj in attrs.items():
if isinstance(obj, SearchField):
field = attrs[field_name]
field.set_instance_name(field_name)
built_fields[field_name] = field
# Only check non-faceted fields for the following info.
if not hasattr(field, 'facet_for'):
if field.faceted == True:
# If no other field is claiming this field as
# ``facet_for``, create a shadow ``FacetField``.
if not field_name in facet_fields:
shadow_facet_name = get_facet_field_name(field_name)
shadow_facet_field = field.facet_class(facet_for=field_name)
shadow_facet_field.set_instance_name(shadow_facet_name)
built_fields[shadow_facet_name] = shadow_facet_field
attrs['fields'].update(built_fields)
# Assigning default 'objects' query manager if it does not already exist
if not 'objects' in attrs:
try:
attrs['objects'] = SearchIndexManager(attrs['Meta'].index_label)
except (KeyError, AttributeError):
attrs['objects'] = SearchIndexManager(DEFAULT_ALIAS)
return super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
class SearchIndex(with_metaclass(DeclarativeMetaclass, threading.local)):
"""
Base class for building indexes.
An example might look like this::
import datetime
from haystack import indexes
from myapp.models import Note
class NoteIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='user')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return Note
def index_queryset(self, using=None):
return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
"""
def __init__(self):
self.prepared_data = None
content_fields = []
for field_name, field in self.fields.items():
if field.document is True:
content_fields.append(field_name)
if not len(content_fields) == 1:
raise SearchFieldError("The index '%s' must have one (and only one) SearchField with document=True." % self.__class__.__name__)
def get_model(self):
"""
Should return the ``Model`` class (not an instance) that the rest of the
``SearchIndex`` should use.
This method is required & you must override it to return the correct class.
"""
raise NotImplementedError("You must provide a 'model' method for the '%r' index." % self)
def index_queryset(self, using=None):
"""
Get the default QuerySet to index when doing a full update.
Subclasses can override this method to avoid indexing certain objects.
"""
return self.get_model()._default_manager.all()
def read_queryset(self, using=None):
"""
Get the default QuerySet for read actions.
Subclasses can override this method to work with other managers.
Useful when working with default managers that filter some objects.
"""
return self.index_queryset(using=using)
def build_queryset(self, using=None, start_date=None, end_date=None):
"""
Get the default QuerySet to index when doing an index update.
Subclasses can override this method to take into account related
model modification times.
The default is to use ``SearchIndex.index_queryset`` and filter
based on ``SearchIndex.get_updated_field``
"""
extra_lookup_kwargs = {}
model = self.get_model()
updated_field = self.get_updated_field()
update_field_msg = ("No updated date field found for '%s' "
"- not restricting by age.") % model.__name__
if start_date:
if updated_field:
extra_lookup_kwargs['%s__gte' % updated_field] = start_date
else:
warnings.warn(update_field_msg)
if end_date:
if updated_field:
extra_lookup_kwargs['%s__lte' % updated_field] = end_date
else:
warnings.warn(update_field_msg)
index_qs = None
if hasattr(self, 'get_queryset'):
warnings.warn("'SearchIndex.get_queryset' was deprecated in Haystack v2. Please rename the method 'index_queryset'.")
index_qs = self.get_queryset()
else:
index_qs = self.index_queryset(using=using)
if not hasattr(index_qs, 'filter'):
raise ImproperlyConfigured("The '%r' class must return a 'QuerySet' in the 'index_queryset' method." % self)
# `.select_related()` seems like a good idea here but can fail on
# nullable `ForeignKey` as well as what seems like other cases.
return index_qs.filter(**extra_lookup_kwargs).order_by(model._meta.pk.name)
def prepare(self, obj):
"""
Fetches and adds/alters data before indexing.
"""
self.prepared_data = {
ID: get_identifier(obj),
DJANGO_CT: "%s.%s" % (obj._meta.app_label, obj._meta.module_name),
DJANGO_ID: force_text(obj.pk),
}
for field_name, field in self.fields.items():
# Use the possibly overridden name, which will default to the
# variable name of the field.
self.prepared_data[field.index_fieldname] = field.prepare(obj)
if hasattr(self, "prepare_%s" % field_name):
value = getattr(self, "prepare_%s" % field_name)(obj)
self.prepared_data[field.index_fieldname] = value
return self.prepared_data
def full_prepare(self, obj):
self.prepared_data = self.prepare(obj)
for field_name, field in self.fields.items():
# Duplicate data for faceted fields.
if getattr(field, 'facet_for', None):
source_field_name = self.fields[field.facet_for].index_fieldname
# If there's data there, leave it alone. Otherwise, populate it
# with whatever the related field has.
if self.prepared_data[field_name] is None and source_field_name in self.prepared_data:
self.prepared_data[field.index_fieldname] = self.prepared_data[source_field_name]
# Remove any fields that lack a value and are ``null=True``.
if field.null is True:
if self.prepared_data[field.index_fieldname] is None:
del(self.prepared_data[field.index_fieldname])
return self.prepared_data
def get_content_field(self):
"""Returns the field that supplies the primary document to be indexed."""
for field_name, field in self.fields.items():
if field.document is True:
return field.index_fieldname
def get_field_weights(self):
"""Returns a dict of fields with weight values"""
weights = {}
for field_name, field in self.fields.items():
if field.boost:
weights[field_name] = field.boost
return weights
def _get_backend(self, using):
if using is None:
try:
using = connection_router.for_write(index=self)[0]
except IndexError:
# There's no backend to handle it. Bomb out.
return None
return connections[using].get_backend()
def update(self, using=None):
"""
Updates the entire index.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self._get_backend(using)
if backend is not None:
backend.update(self, self.index_queryset(using=using))
def update_object(self, instance, using=None, **kwargs):
"""
Update the index for a single object. Attached to the class's
post-save hook.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
# Check to make sure we want to index this first.
if self.should_update(instance, **kwargs):
backend = self._get_backend(using)
if backend is not None:
backend.update(self, [instance])
def remove_object(self, instance, using=None, **kwargs):
"""
Remove an object from the index. Attached to the class's
post-delete hook.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self._get_backend(using)
if backend is not None:
backend.remove(instance, **kwargs)
def clear(self, using=None):
"""
Clears the entire index.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
backend = self._get_backend(using)
if backend is not None:
backend.clear(models=[self.get_model()])
def reindex(self, using=None):
"""
Completely clear the index for this model and rebuild it.
If ``using`` is provided, it specifies which connection should be
used. Default relies on the routers to decide which backend should
be used.
"""
self.clear(using=using)
self.update(using=using)
def get_updated_field(self):
"""
Get the field name that represents the updated date for the model.
If specified, this is used by the reindex command to filter out results
from the QuerySet, enabling you to reindex only recent records. This
method should either return None (reindex everything always) or a
string of the Model's DateField/DateTimeField name.
"""
return None
def should_update(self, instance, **kwargs):
"""
Determine if an object should be updated in the index.
It's useful to override this when an object may save frequently and
cause excessive reindexing. You should check conditions on the instance
and return False if it is not to be indexed.
By default, returns True (always reindex).
"""
return True
def load_all_queryset(self):
"""
Provides the ability to override how objects get loaded in conjunction
with ``SearchQuerySet.load_all``.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
By default, returns ``all()`` on the model's default manager.
"""
return self.get_model()._default_manager.all()
class BasicSearchIndex(SearchIndex):
text = CharField(document=True, use_template=True)
# End SearchIndexes
# Begin ModelSearchIndexes
def index_field_from_django_field(f, default=CharField):
"""
Returns the Haystack field type that would likely be associated with each
Django type.
"""
result = default
if f.get_internal_type() in ('DateField', 'DateTimeField'):
result = DateTimeField
elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'):
result = BooleanField
elif f.get_internal_type() in ('CommaSeparatedIntegerField',):
result = MultiValueField
elif f.get_internal_type() in ('DecimalField', 'FloatField'):
result = FloatField
elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'):
result = IntegerField
return result
class ModelSearchIndex(SearchIndex):
"""
Introspects the model assigned to it and generates a `SearchIndex` based on
the fields of that model.
In addition, it adds a `text` field that is the `document=True` field and
has `use_template=True` option set, just like the `BasicSearchIndex`.
Usage of this class might result in inferior `SearchIndex` objects, which
can directly affect your search results. Use this to establish basic
functionality and move to custom `SearchIndex` objects for better control.
At this time, it does not handle related fields.
"""
text = CharField(document=True, use_template=True)
# list of reserved field names
fields_to_skip = (ID, DJANGO_CT, DJANGO_ID, 'content', 'text')
def __init__(self, extra_field_kwargs=None):
self.model = None
self.prepared_data = None
content_fields = []
self.extra_field_kwargs = extra_field_kwargs or {}
# Introspect the model, adding/removing fields as needed.
# Adds/Excludes should happen only if the fields are not already
# defined in `self.fields`.
self._meta = getattr(self, 'Meta', None)
if self._meta:
self.model = getattr(self._meta, 'model', None)
fields = getattr(self._meta, 'fields', [])
excludes = getattr(self._meta, 'excludes', [])
# Add in the new fields.
self.fields.update(self.get_fields(fields, excludes))
for field_name, field in self.fields.items():
if field.document is True:
content_fields.append(field_name)
if not len(content_fields) == 1:
raise SearchFieldError("The index '%s' must have one (and only one) SearchField with document=True." % self.__class__.__name__)
def should_skip_field(self, field):
"""
Given a Django model field, return if it should be included in the
contributed SearchFields.
"""
# Skip fields in skip list
if field.name in self.fields_to_skip:
return True
# Ignore certain fields (AutoField, related fields).
if field.primary_key or getattr(field, 'rel'):
return True
return False
def get_model(self):
return self.model
def get_index_fieldname(self, f):
"""
Given a Django field, return the appropriate index fieldname.
"""
return f.name
def get_fields(self, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
for f in self.model._meta.fields:
# If the field name is already present, skip
if f.name in self.fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if self.should_skip_field(f):
continue
index_field_class = index_field_from_django_field(f)
kwargs = copy.copy(self.extra_field_kwargs)
kwargs.update({
'model_attr': f.name,
})
if f.null is True:
kwargs['null'] = True
if f.has_default():
kwargs['default'] = f.default
final_fields[f.name] = index_field_class(**kwargs)
final_fields[f.name].set_instance_name(self.get_index_fieldname(f))
return final_fields
| 35.814286
| 139
| 0.623454
|
751faedaae21bf7e5100079d0678b241912f476e
| 3,078
|
py
|
Python
|
calibration/TempReg.py
|
shuoli90/PAC-confidence-set
|
ab8dcd5205f9aba6b490aabe7bfc74e1410d0f26
|
[
"Apache-2.0"
] | 6
|
2020-04-05T18:55:15.000Z
|
2021-08-23T02:22:48.000Z
|
calibration/TempReg.py
|
shuoli90/PAC-confidence-set
|
ab8dcd5205f9aba6b490aabe7bfc74e1410d0f26
|
[
"Apache-2.0"
] | null | null | null |
calibration/TempReg.py
|
shuoli90/PAC-confidence-set
|
ab8dcd5205f9aba6b490aabe7bfc74e1410d0f26
|
[
"Apache-2.0"
] | 1
|
2021-03-29T15:06:43.000Z
|
2021-03-29T15:06:43.000Z
|
import os, sys
import types
import time
import numpy as np
import math
import torch as tc
import torch.tensor as T
sys.path.append("../../")
#from conf_set.utils import *
from classification.utils import *
from calibration.calibrator import BaseCalibrator
##
## calibration for regression
##
class TempScalingReg(BaseCalibrator):
def __init__(self, params, model):
super().__init__(params, model)
self.set_opt_params(self.model.cal_parameters())
def loss(self, fhs, ys, reduction='mean'):
yhs, yhs_var = fhs
loss = self.model.neg_log_prob(yhs, yhs_var, ys)
if reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
elif reduction == 'mean':
return loss.mean()
else:
raise NotImplementedError
def train_epoch(self, ld_tr, opt):
loss_fn = self.loss
for xs_src, ys_src in ld_tr:
if hasattr(xs_src, "to"):
xs_src = xs_src.to(self.device)
else:
assert(hasattr(xs_src[0], "to"))
xs_src = [x.to(self.device) for x in xs_src]
ys_src = ys_src.to(self.device)
# init for backprop
opt.zero_grad()
# compute loss
fhs = self.model(xs_src)
loss = loss_fn(fhs, ys_src)
# backprop
loss.backward()
# update parameters
opt.step()
# clip the value
[T.data.clamp_(1e-9) for T in self.model.cal_parameters()]
print("T:", self.model.cal_parameters())
return loss
def test(self, lds, ld_names, model=None):
if model is None:
model = self.model
model.eval()
loss_fn = lambda fhs, ys: self.loss(fhs, ys, reduction='none')
## regression loss
if ld_names is not None:
assert(len(lds) == len(ld_names))
errors = []
for i, ld in enumerate(lds):
error, _, _ = compute_cls_error([ld], model, self.device, loss_fn=loss_fn)
if ld_names is not None:
print("# %s regression loss: %f"%(ld_names[i], error))
else:
print("# regression loss: %f"%(error))
errors.append(error.unsqueeze(0))
return errors
def validate(self, ld, i):
self.model.eval()
loss_fn = lambda fhs, ys: self.loss(fhs, ys, reduction='none')
loss_mean, _, _ = compute_cls_error([ld], self.model, self.device, loss_fn=loss_fn)
if self.loss_best >= loss_mean:
self.loss_best = loss_mean
self.epoch_best = i
return loss_mean
def set_stop_criterion(self):
self.epoch_best = -np.inf
self.loss_best = np.inf
def stop_criterion(self, epoch):
if epoch - self.epoch_best >= self.params.n_epochs*self.params.early_term_cri:
return True
else:
return False
| 29.883495
| 91
| 0.552307
|
3b1662d8f3433ed2e651aae9d3d0b91155c986dd
| 579
|
py
|
Python
|
ros2_facelook_node/launch/ros2_facelook_node.launch.py
|
Misterblue/ros2-looker
|
1b42dede9276708364f876e1ecd6e67118e9c09c
|
[
"Apache-2.0"
] | 1
|
2021-04-07T09:50:20.000Z
|
2021-04-07T09:50:20.000Z
|
ros2_facelook_node/launch/ros2_facelook_node.launch.py
|
Misterblue/ros2-looker
|
1b42dede9276708364f876e1ecd6e67118e9c09c
|
[
"Apache-2.0"
] | null | null | null |
ros2_facelook_node/launch/ros2_facelook_node.launch.py
|
Misterblue/ros2-looker
|
1b42dede9276708364f876e1ecd6e67118e9c09c
|
[
"Apache-2.0"
] | null | null | null |
import launch
import launch.actions
import launch.substitutions
import launch_ros.actions
def generate_launch_description():
return launch.LaunchDescription([
launch.actions.DeclareLaunchArgument(
'node_prefix',
default_value=[launch.substitutions.EnvironmentVariable('USER'), '_'],
description='Prefix for node names'),
launch_ros.actions.Node(
package='ros2_facelook_node', executable='service', output='screen',
name=[launch.substitutions.LaunchConfiguration('node_prefix'), 'talker']),
])
| 34.058824
| 86
| 0.694301
|
ce9e906f44ab641dbb560ec25c828d9073931af1
| 9,060
|
py
|
Python
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_maintenance_windows_operations.py
|
RAY-316/azure-sdk-for-python
|
4f7790deaf46c6f4e965f099f36eb73a7954ad5b
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_maintenance_windows_operations.py
|
RSidea/azure-sdk-for-python
|
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_maintenance_windows_operations.py
|
RSidea/azure-sdk-for-python
|
8f691b2c95ee0fc53b12d08bd83e3f134d9cf0ef
|
[
"MIT"
] | 1
|
2021-12-18T20:01:22.000Z
|
2021-12-18T20:01:22.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlPoolMaintenanceWindowsOperations:
"""SqlPoolMaintenanceWindowsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
maintenance_window_name: str,
**kwargs
) -> "_models.MaintenanceWindows":
"""Get a SQL pool's Maintenance Windows.
Get a SQL pool's Maintenance Windows.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param maintenance_window_name: Maintenance window name.
:type maintenance_window_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceWindows, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.MaintenanceWindows
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MaintenanceWindows"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['maintenanceWindowName'] = self._serialize.query("maintenance_window_name", maintenance_window_name, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MaintenanceWindows', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/maintenancewindows/current'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
maintenance_window_name: str,
parameters: "_models.MaintenanceWindows",
**kwargs
) -> None:
"""Creates or updates a Sql pool's maintenance windows settings.
Creates or updates a Sql pool's maintenance windows settings.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param maintenance_window_name: Maintenance window name.
:type maintenance_window_name: str
:param parameters: The required parameters for creating or updating Maintenance Windows
settings.
:type parameters: ~azure.mgmt.synapse.models.MaintenanceWindows
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'sqlPoolName': self._serialize.url("sql_pool_name", sql_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['maintenanceWindowName'] = self._serialize.query("maintenance_window_name", maintenance_window_name, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'MaintenanceWindows')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/maintenancewindows/current'} # type: ignore
| 49.508197
| 230
| 0.684658
|
0cd9263cb56184d9d180dfa439e0dc777c917f4a
| 22,883
|
py
|
Python
|
frille-lang/lib/python3.6/site-packages/sklearn/linear_model/_base.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 13
|
2020-05-03T18:42:05.000Z
|
2022-03-23T07:44:19.000Z
|
frille-lang/lib/python3.6/site-packages/sklearn/linear_model/_base.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 1
|
2021-06-08T06:03:51.000Z
|
2021-06-08T06:03:51.000Z
|
frille-lang/lib/python3.6/site-packages/sklearn/linear_model/_base.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | 7
|
2020-07-09T15:03:58.000Z
|
2021-12-22T04:19:27.000Z
|
"""
Generalized Linear Models.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Vincent Michel <vincent.michel@inria.fr>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Maryan Morel <maryan.morel@polytechnique.edu>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numbers
import warnings
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from scipy import optimize
from scipy import sparse
from scipy.special import expit
from joblib import Parallel
from ..base import (BaseEstimator, ClassifierMixin, RegressorMixin,
MultiOutputMixin)
from ..utils import check_array
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import _deprecate_positional_args
from ..utils import check_random_state
from ..utils.extmath import safe_sparse_dot
from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale
from ..utils.fixes import sparse_lsqr
from ..utils._seq_dataset import ArrayDataset32, CSRDataset32
from ..utils._seq_dataset import ArrayDataset64, CSRDataset64
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
from ..preprocessing import normalize as f_normalize
# TODO: bayesian_ridge_regression and bayesian_regression_ard
# should be squashed into its respective objects.
SPARSE_INTERCEPT_DECAY = 0.01
# For sparse data intercept updates are scaled by this decay factor to avoid
# intercept oscillation.
def make_dataset(X, y, sample_weight, random_state=None):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : array-like, shape (n_samples, )
Target values.
sample_weight : numpy array of shape (n_samples,)
The weight of each sample
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
dataset
The ``Dataset`` abstraction
intercept_decay
The intercept decay
"""
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset64
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight,
seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
sample_weight=None, return_mean=False, check_input=True):
"""Center and scale data.
Centers data to have mean zero along axis 0. If fit_intercept=False or if
the X is a sparse matrix, no centering is done, but normalization can still
be applied. The function returns the statistics necessary to reconstruct
the input data, which are X_offset, y_offset, X_scale, such that the output
X = (X - X_offset) / X_scale
X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
then the weighted mean of X and y is zero, and not the mean itself. If
return_mean=True, the mean, eventually weighted, is returned, independently
of whether X was centered (option used for optimization with sparse data in
coordinate_descend).
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
"""
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if check_input:
X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'],
dtype=FLOAT_DTYPES)
elif copy:
if sp.issparse(X):
X = X.copy()
else:
X = X.copy(order='K')
y = np.asarray(y, dtype=X.dtype)
if fit_intercept:
if sp.issparse(X):
X_offset, X_var = mean_variance_axis(X, axis=0)
if not return_mean:
X_offset[:] = X.dtype.type(0)
if normalize:
# TODO: f_normalize could be used here as well but the function
# inplace_csr_row_normalize_l2 must be changed such that it
# can return also the norms computed internally
# transform variance to norm in-place
X_var *= X.shape[0]
X_scale = np.sqrt(X_var, X_var)
del X_var
X_scale[X_scale == 0] = 1
inplace_column_scale(X, 1. / X_scale)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
else:
X_offset = np.average(X, axis=0, weights=sample_weight)
X -= X_offset
if normalize:
X, X_scale = f_normalize(X, axis=0, copy=False,
return_norm=True)
else:
X_scale = np.ones(X.shape[1], dtype=X.dtype)
y_offset = np.average(y, axis=0, weights=sample_weight)
y = y - y_offset
else:
X_offset = np.zeros(X.shape[1], dtype=X.dtype)
X_scale = np.ones(X.shape[1], dtype=X.dtype)
if y.ndim == 1:
y_offset = X.dtype.type(0)
else:
y_offset = np.zeros(y.shape[1], dtype=X.dtype)
return X, y, X_offset, y_offset, X_scale
# TODO: _rescale_data should be factored into _preprocess_data.
# Currently, the fact that sag implements its own way to deal with
# sample_weight makes the refactoring tricky.
def _rescale_data(X, y, sample_weight):
"""Rescale data sample-wise by square root of sample_weight.
For many linear models, this enables easy support for sample_weight.
Returns
-------
X_rescaled : {array-like, sparse matrix}
y_rescaled : {array-like, sparse matrix}
"""
n_samples = X.shape[0]
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight,
dtype=sample_weight.dtype)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
class LinearModel(BaseEstimator, metaclass=ABCMeta):
"""Base class for Linear Models"""
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _decision_function(self, X):
check_is_fitted(self)
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
def predict(self, X):
"""
Predict using the linear model.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape (n_samples,)
Returns predicted values.
"""
return self._decision_function(X)
_preprocess_data = staticmethod(_preprocess_data)
def _set_intercept(self, X_offset, y_offset, X_scale):
"""Set the intercept_
"""
if self.fit_intercept:
self.coef_ = self.coef_ / X_scale
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
else:
self.intercept_ = 0.
def _more_tags(self):
return {'requires_y': True}
# XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
# Maybe the n_features checking can be moved to LinearModel.
class LinearClassifierMixin(ClassifierMixin):
"""Mixin for linear classifiers.
Handles prediction for sparse and dense X.
"""
def decision_function(self, X):
"""
Predict confidence scores for samples.
The confidence score for a sample is proportional to the signed
distance of that sample to the hyperplane.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes)
Confidence scores per (sample, class) combination. In the binary
case, confidence score for self.classes_[1] where >0 means this
class would be predicted.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
n_features = self.coef_.shape[1]
if X.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (X.shape[1], n_features))
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel() if scores.shape[1] == 1 else scores
def predict(self, X):
"""
Predict class labels for samples in X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
-------
C : array, shape [n_samples]
Predicted class label per sample.
"""
scores = self.decision_function(X)
if len(scores.shape) == 1:
indices = (scores > 0).astype(int)
else:
indices = scores.argmax(axis=1)
return self.classes_[indices]
def _predict_proba_lr(self, X):
"""Probability estimation for OvR logistic regression.
Positive class probabilities are computed as
1. / (1. + np.exp(-self.decision_function(X)));
multiclass is handled by normalizing that over all classes.
"""
prob = self.decision_function(X)
expit(prob, out=prob)
if prob.ndim == 1:
return np.vstack([1 - prob, prob]).T
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
class SparseCoefMixin:
"""Mixin for converting coef_ to and from CSR format.
L1-regularizing estimators should inherit this.
"""
def densify(self):
"""
Convert coefficient matrix to dense array format.
Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
default format of ``coef_`` and is required for fitting, so calling
this method is only required on models that have previously been
sparsified; otherwise, it is a no-op.
Returns
-------
self
Fitted estimator.
"""
msg = "Estimator, %(name)s, must be fitted before densifying."
check_is_fitted(self, msg=msg)
if sp.issparse(self.coef_):
self.coef_ = self.coef_.toarray()
return self
def sparsify(self):
"""
Convert coefficient matrix to sparse format.
Converts the ``coef_`` member to a scipy.sparse matrix, which for
L1-regularized models can be much more memory- and storage-efficient
than the usual numpy.ndarray representation.
The ``intercept_`` member is not converted.
Returns
-------
self
Fitted estimator.
Notes
-----
For non-sparse models, i.e. when there are not many zeros in ``coef_``,
this may actually *increase* memory usage, so use this method with
care. A rule of thumb is that the number of zero elements, which can
be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
to provide significant benefits.
After calling this method, further fitting with the partial_fit
method (if any) will not work until you call densify.
"""
msg = "Estimator, %(name)s, must be fitted before sparsifying."
check_is_fitted(self, msg=msg)
self.coef_ = sp.csr_matrix(self.coef_)
return self
class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
"""
Ordinary least squares Linear Regression.
LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
to minimize the residual sum of squares between the observed targets in
the dataset, and the targets predicted by the linear approximation.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
n_jobs : int, default=None
The number of jobs to use for the computation. This will only provide
speedup for n_targets > 1 and sufficient large problems.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive. This
option is only supported for dense arrays.
.. versionadded:: 0.24
Attributes
----------
coef_ : array of shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
one target is passed, this is a 1D array of length n_features.
rank_ : int
Rank of matrix `X`. Only available when `X` is dense.
singular_ : array of shape (min(X, y),)
Singular values of `X`. Only available when `X` is dense.
intercept_ : float or array of shape (n_targets,)
Independent term in the linear model. Set to 0.0 if
`fit_intercept = False`.
See Also
--------
Ridge : Ridge regression addresses some of the
problems of Ordinary Least Squares by imposing a penalty on the
size of the coefficients with l2 regularization.
Lasso : The Lasso is a linear model that estimates
sparse coefficients with l1 regularization.
ElasticNet : Elastic-Net is a linear regression
model trained with both l1 and l2 -norm regularization of the
coefficients.
Notes
-----
From the implementation point of view, this is just plain Ordinary
Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
(scipy.optimize.nnls) wrapped as a predictor object.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
>>> # y = 1 * x_0 + 2 * x_1 + 3
>>> y = np.dot(X, np.array([1, 2])) + 3
>>> reg = LinearRegression().fit(X, y)
>>> reg.score(X, y)
1.0
>>> reg.coef_
array([1., 2.])
>>> reg.intercept_
3.0000...
>>> reg.predict(np.array([[3, 5]]))
array([16.])
"""
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, normalize=False, copy_X=True,
n_jobs=None, positive=False):
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.n_jobs = n_jobs
self.positive = positive
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ['csr', 'csc', 'coo']
X, y = self._validate_data(X, y, accept_sparse=accept_sparse,
y_numeric=True, multi_output=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
copy=self.copy_X, sample_weight=sample_weight,
return_mean=True)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
if self.positive:
if y.ndim < 2:
self.coef_, self._residues = optimize.nnls(X, y)
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j])
for j in range(y.shape[1]))
self.coef_, self._residues = map(np.vstack, zip(*outs))
elif sp.issparse(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X_centered = sparse.linalg.LinearOperator(shape=X.shape,
matvec=matvec,
rmatvec=rmatvec)
if y.ndim < 2:
out = sparse_lsqr(X_centered, y)
self.coef_ = out[0]
self._residues = out[3]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(sparse_lsqr)(X_centered, y[:, j].ravel())
for j in range(y.shape[1]))
self.coef_ = np.vstack([out[0] for out in outs])
self._residues = np.vstack([out[3] for out in outs])
else:
self.coef_, self._residues, self.rank_, self.singular_ = \
linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy,
check_input=True, sample_weight=None):
"""Aux function used at beginning of fit in linear models
Parameters
----------
order : 'F', 'C' or None, default=None
Whether X and y will be forced to be fortran or c-style. Only relevant
if sample_weight is not None.
"""
n_samples, n_features = X.shape
if sparse.isspmatrix(X):
# copy is not needed here as X is not modified inplace when X is sparse
precompute = False
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=False, return_mean=True, check_input=check_input)
else:
# copy was done in fit if necessary
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy,
check_input=check_input, sample_weight=sample_weight)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight=sample_weight)
if hasattr(precompute, '__array__') and (
fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or
normalize and not np.allclose(X_scale, np.ones(n_features))):
warnings.warn("Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
UserWarning)
# recompute Gram
precompute = 'auto'
Xy = None
# precompute if n_samples > n_features
if isinstance(precompute, str) and precompute == 'auto':
precompute = (n_samples > n_features)
if precompute is True:
# make sure that the 'precompute' array is contiguous.
precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype,
order='C')
np.dot(X.T, X, out=precompute)
if not hasattr(precompute, '__array__'):
Xy = None # cannot use Xy if precompute is not Gram
if hasattr(precompute, '__array__') and Xy is None:
common_dtype = np.find_common_type([X.dtype, y.dtype], [])
if y.ndim == 1:
# Xy is 1d, make sure it is contiguous.
Xy = np.empty(shape=n_features, dtype=common_dtype, order='C')
np.dot(X.T, y, out=Xy)
else:
# Make sure that Xy is always F contiguous even if X or y are not
# contiguous: the goal is to make it fast to extract the data for a
# specific target.
n_targets = y.shape[1]
Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype,
order='F')
np.dot(y.T, X, out=Xy.T)
return X, y, X_offset, y_offset, X_scale, precompute, Xy
| 35.587869
| 79
| 0.613032
|
de32673f71e13901d220def4e92199f64d3258f0
| 37,615
|
py
|
Python
|
nmmn/plots.py
|
rsnemmen/nemmen
|
259887ef4fa0cd1ccc50e404998d3c9e27b2fd79
|
[
"MIT"
] | 16
|
2016-09-14T00:44:51.000Z
|
2022-03-11T22:19:10.000Z
|
nmmn/plots.py
|
rsnemmen/nemmen
|
259887ef4fa0cd1ccc50e404998d3c9e27b2fd79
|
[
"MIT"
] | 2
|
2018-03-04T02:26:20.000Z
|
2018-07-06T20:32:19.000Z
|
nmmn/plots.py
|
rsnemmen/nemmen
|
259887ef4fa0cd1ccc50e404998d3c9e27b2fd79
|
[
"MIT"
] | 3
|
2017-11-13T10:46:21.000Z
|
2019-06-26T07:22:58.000Z
|
"""
Fancy plots
==============
"""
import numpy
from matplotlib import pylab
from nmmn import sed
def plot(spec):
"""
Returns the plot of a grmonty spectrum as a pyplot object or plot it on
the screen
:param param: grmonty spectrum file
"""
s = sed.SED()
s.grmonty(spec)
pylab.plot(s.lognu, s.ll)
pylab.show()
def onehist(x,xlabel='',fontsize=12):
"""
Script that plots the histogram of x with the corresponding xlabel.
"""
pylab.clf()
pylab.rcParams.update({'font.size': fontsize})
pylab.hist(x,histtype='stepfilled')
pylab.legend()
#### Change the X-axis appropriately ####
pylab.xlabel(xlabel)
pylab.ylabel('Number')
pylab.draw()
pylab.show()
def twohists(x1,x2,xmin,xmax,range=None,x1leg='$x_1$',x2leg='$x_2$',xlabel='',fig=1,sharey=False,fontsize=12,bins1=10,bins2=10):
"""
Script that plots two histograms of quantities x1 and x2
sharing the same X-axis.
:param x1,x2: arrays with data to be plotted
:param xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
:param x1leg, x2leg: legends for each histogram
:param xlabel: self-explanatory.
:param bins1,bins2: number of bins in each histogram
:param fig: which plot window should I use?
:param range: in the form (xmin,xmax), same as range argument for hist and applied to both
histograms.
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(2,1,1)
if sharey==True:
b=fig.add_subplot(2,1,2, sharex=a, sharey=a)
else:
b=fig.add_subplot(2,1,2, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',histtype='stepfilled',range=range)
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',histtype='stepfilled',range=range)
b.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
b.set_xlabel(xlabel)
b.set_ylabel('Number',verticalalignment='bottom')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def threehists(x1,x2,x3,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',xlabel='',fig=1,sharey=False,fontsize=12):
"""
Script that plots three histograms of quantities x1, x2 and x3
sharing the same X-axis.
Arguments:
- x1,x2,x3: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range for both histograms.
- x1leg, x2leg, x3leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)',sharey=True)
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(3,1,1)
if sharey==True:
b=fig.add_subplot(3,1,2, sharex=a, sharey=a)
c=fig.add_subplot(3,1,3, sharex=a, sharey=a)
else:
b=fig.add_subplot(3,1,2, sharex=a)
c=fig.add_subplot(3,1,3, sharex=a)
a.hist(x1,label=x1leg,color='b',histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,label=x2leg,color='r',histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,label=x3leg,color='y',histtype='stepfilled')
c.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
c.set_xlabel(xlabel)
b.set_ylabel('Number')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fourhists(x1,x2,x3,x4,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',x4leg='$x_3$',xlabel='',fig=1,sharey=False,fontsize=12,bins1=10,bins2=10,bins3=10,bins4=10,line1=None,line2=None,line3=None,line4=None,line1b=None,line2b=None,line3b=None,line4b=None,loc='best'):
"""
Script that plots four histograms of quantities x1, x2, x3 and x4
sharing the same X-axis.
Arguments:
- x1,x2,x3,x4: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
or both histograms.
- x1leg, x2leg, x3leg, x4leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- bins1,bins2,...: number of bins in each histogram
- fig: which plot window should I use?
- line?: draws vertical solid lines at the positions indicated in each panel
- line?b: draws vertical dashed lines at the positions indicated in each panel
.. figure:: ../figures/fourhists.png
:scale: 100 %
:alt: Four histograms in the same figure
Four histograms in the same figure.
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(4,1,1)
if sharey==True:
b=fig.add_subplot(4,1,2, sharex=a, sharey=a)
c=fig.add_subplot(4,1,3, sharex=a, sharey=a)
d=fig.add_subplot(4,1,4, sharex=a, sharey=a)
else:
b=fig.add_subplot(4,1,2, sharex=a)
c=fig.add_subplot(4,1,3, sharex=a)
d=fig.add_subplot(4,1,4, sharex=a)
def vline(hist,value,linestyle='k'):
"""Draw vertical line"""
yax=hist.set_ylim()
hist.plot([value,value],[yax[0],yax[1]],linestyle,linewidth=2)
a.hist(x1,bins1,label=x1leg,color='b',histtype='stepfilled')
a.legend(loc=loc,frameon=False)
a.set_xlim(xmin,xmax)
if line1!=None: vline(a,line1)
if line1b!=None: vline(a,line1b,'k--')
b.hist(x2,bins2,label=x2leg,color='r',histtype='stepfilled')
b.legend(loc=loc,frameon=False)
if line2!=None: vline(b,line2)
if line2b!=None: vline(b,line2b,'k--')
c.hist(x3,bins3,label=x3leg,color='y',histtype='stepfilled')
c.legend(loc=loc,frameon=False)
if line3!=None: vline(c,line3)
if line3b!=None: vline(c,line3b,'k--')
d.hist(x4,bins4,label=x4leg,color='g',histtype='stepfilled')
d.legend(loc=loc,frameon=False)
if line4!=None: vline(d,line4)
if line4b!=None: vline(d,line4b,'k--')
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
pylab.setp(c.get_xticklabels(), visible=False)
d.set_xlabel(xlabel)
c.set_ylabel('Number')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fourcumplot(x1,x2,x3,x4,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',x4leg='$x_3$',xlabel='',ylabel='$N(x>x\')$',fig=1,sharey=False,fontsize=12,bins1=50,bins2=50,bins3=50,bins4=50):
"""
Script that plots the cumulative histograms of four variables x1, x2, x3 and x4
sharing the same X-axis. For each bin, Y is the fraction of the sample
with values above X.
Arguments:
- x1,x2,x3,x4: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
- x1leg, x2leg, x3leg, x4leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- bins1,bins2,...: number of bins in each histogram
- fig: which plot window should I use?
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
v1 Jun. 2012: inherited from fourhists.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(4,1,1)
if sharey==True:
b=fig.add_subplot(4,1,2, sharex=a, sharey=a)
c=fig.add_subplot(4,1,3, sharex=a, sharey=a)
d=fig.add_subplot(4,1,4, sharex=a, sharey=a)
else:
b=fig.add_subplot(4,1,2, sharex=a)
c=fig.add_subplot(4,1,3, sharex=a)
d=fig.add_subplot(4,1,4, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',cumulative=-True,normed=True,histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',cumulative=-True,normed=True,histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,bins3,label=x3leg,color='y',cumulative=-True,normed=True,histtype='stepfilled')
c.legend(loc='best',frameon=False)
d.hist(x4,bins4,label=x4leg,color='g',cumulative=-True,normed=True,histtype='stepfilled')
d.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
pylab.setp(c.get_xticklabels(), visible=False)
d.set_xlabel(xlabel)
c.set_ylabel(ylabel)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def threehistsx(x1,x2,x3,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',fig=1,fontsize=12,bins1=10,bins2=10,bins3=10):
"""
Script that pretty-plots three histograms of quantities x1, x2 and x3.
Arguments:
:param x1,x2,x3: arrays with data to be plotted
:param x1leg, x2leg, x3leg: legends for each histogram
:param fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)')
Inspired by http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label.
"""
pylab.rcParams.update({'font.size': fontsize})
pylab.figure(fig)
pylab.clf()
pylab.subplot(3,1,1)
pylab.hist(x1,label=x1leg,color='b',bins=bins1)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,2)
pylab.hist(x2,label=x2leg,color='r',bins=bins2)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,3)
pylab.hist(x3,label=x3leg,color='y',bins=bins3)
pylab.legend(loc='best',frameon=False)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fitconf(xdata,ydata,errx,erry,covxy,nboot=1000,bcesMethod='ort',linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot the best-fit line and
confidence band (generated using analytical methods). I decided to put together
these commands in a method because I have been using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
import bces.bces
from . import stats
from . import misc
# Selects the desired BCES method
i=misc.whichbces(bcesMethod)
# Performs the BCES fit
a,b,erra,errb,cov=bces.bces.bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=stats.confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(xcb, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
return a,b,erra,errb,cov
def fitconfmc(xdata,ydata,errx,erry,covxy,nboot=1000,bcesMethod='ort',linestyle='',conf=1.,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot the best-fit line and
confidence band (generated using MC). I decided to put together these
commands in a method because I have been using them very frequently.
Assumes you initialized the plot window before calling this method.
This method is more stable than fitconf, which is plagued with numerical
instabilities when computing the gradient.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
- conf: size of confidence band to be plotted in standard deviations
"""
import bces.bces
from . import misc
# Selects the desired BCES method
i=misc.whichbces(bcesMethod)
# Performs the BCES fit
a,b,erra,errb,cov=bces.bces.bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
# Plots confidence band
lcb,ucb,y=confbandmc(x,fitm,covm,10000,conf)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(x, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
return a,b,erra,errb,cov
def plotlinfit(xdata,ydata,a,b,erra,errb,cov,linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the output data from a linear regression
method (for example, bayeslin.pro, the Bayesian linear regression method
of Kelly (2007)), it plots the fits and the confidence bands.
The input is:
X, Y, slope (A), errA, intercept (B), errB and cov(A,B)
Assumes you initialized the plot window before calling this method.
Usage:
>>> nemmen.plotlinfit(x,y,a,b,erra,errb,covab,linestyle='k',confcolor='LightGrey')
Explanation of some arguments:
- xplot: if provided, will compute the confidence band in the X-values provided
with xplot
- front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a*x+b,linestyle,**args)
fitm=numpy.array([ a,b ]) # array with best-fit parameters
covm=numpy.array([ (erra**2,cov), (cov,errb**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
if front==True:
zorder=10
else:
zorder=None
pylab.fill_between(xcb, lcb, ucb, alpha=0.3, facecolor=confcolor, zorder=zorder)
def jh(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.683,confcolor='gray',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters, best-fit line and
confidence band. Then returns the points corresponding to the line and
confidence band.
I wrote this for the John Hunter plotting contest, in order to simplify
my AGN-GRB plot. Inherited from method fitconf.
Usage:
>>> x,y,lcb,ucb=nemmen.fitconf(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
where y are the line points, lcb and ucb are the lower and upper confidence band
points.
:param xplot: if provided, will compute the confidence band in the X-values provided
with xplot
:param front: if True, then will plot the confidence band in front of the data
points; otherwise, will plot it behind the points
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
y=a[i]*x+b[i]
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
return x,y,lcb,ucb
def fitconfpred(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.68,confcolor='LightGrey',predcolor='Khaki',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot (i) the best-fit line,
(ii) confidence band and (iii) prediction band.
I decided to put together these commands in a method because I have been
using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitconfpred(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',confcolor='LightGrey')
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
if front==True:
zorder=10
else:
zorder=None
# Plots prediction band
lpb,upb,xpb=predbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xpb, lpb, upb, facecolor=predcolor,edgecolor='', zorder=zorder)
# Plots confidence band
lcb,ucb,xcb=confbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xcb, lcb, ucb, facecolor=confcolor,edgecolor='', zorder=zorder)
return a,b,erra,errb,cov
def fitpred(xdata,ydata,errx,erry,covxy,nboot=1000,bces='ort',linestyle='',conf=0.68,predcolor='Khaki',xplot=None,front=False,**args):
"""
This is a wrapper that given the input data performs the BCES
fit, get the orthogonal parameters and plot (i) the best-fit line and
(ii) prediction band.
I decided to put together these commands in a method because I have been
using them very frequently.
Assumes you initialized the plot window before calling this method.
Usage:
>>> a1,b1,erra1,errb1,cov1=nemmen.fitpred(x[i],y[i],errx[i],erry[i],covxy[i],nboot,bces,linestyle='k',predcolor='LightGrey')
"""
# Selects the desired BCES method
i=whichbces(bces)
# Performs the BCES fit
a,b,erra,errb,cov=bcesp(xdata,errx,ydata,erry,covxy,nboot)
# Plots best-fit
if xplot==None:
x=numpy.linspace(xdata.min(),xdata.max(),100)
else:
x=xplot
pylab.plot(x,a[i]*x+b[i],linestyle,**args)
fitm=numpy.array([ a[i],b[i] ]) # array with best-fit parameters
covm=numpy.array([ (erra[i]**2,cov[i]), (cov[i],errb[i]**2) ]) # covariance matrix
def func(x): return x[1]*x[0]+x[2]
if front==True:
zorder=10
else:
zorder=None
# Plots prediction band
lpb,upb,xpb=predbandnl(xdata,ydata,func,fitm,covm,2,conf,x)
pylab.fill_between(xpb, lpb, upb, facecolor=predcolor,edgecolor='', zorder=zorder)
return a,b,erra,errb,cov
def uerrorbar(ux,uy,**args):
"""
Adaptation of pylab.errorbar to work with arrays defined using the
uncertainties package, which include the errorbars built-in.
Usage:
>>> uerrorbar(x,y,fmt='o')
will plot the points and error bars associated with the 'unumpy'
arrays x and y
"""
x=unumpy.nominal_values(ux)
y=unumpy.nominal_values(uy)
errx=unumpy.std_devs(ux)
erry=unumpy.std_devs(uy)
pylab.errorbar(x,y,xerr=errx,yerr=erry,**args)
def text(x,y,s,**args):
"""
Version of pylab.text that can be applied to arrays.
Usage:
>>> text(x,y,s, fontsize=10)
will plot the strings in array 's' at coordinates given by arrays
'x' and 'y'.
"""
for j in range(x.size):
pylab.text(x[j],y[j],s[j], **args)
def ipyplots():
"""
Makes sure we have exactly the same matplotlib settings as in the IPython terminal
version. Call this from IPython notebook.
`Source <http://stackoverflow.com/questions/16905028/why-is-matplotlib-plot-produced-from-ipython-notebook-slightly-different-from-te)>`_.
"""
pylab.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)
pylab.rcParams['font.size']=12 #10
pylab.rcParams['savefig.dpi']=100 #72
pylab.rcParams['figure.subplot.bottom']=.1 #.125
def make_cmap(colors, position=None, bit=False):
'''
make_cmap takes a list of tuples which contain RGB values. The RGB
values may either be in 8-bit [0 to 255] (in which bit must be set to
True when called) or arithmetic [0 to 1] (default). make_cmap returns
a cmap with equally spaced colors.
Arrange your tuples so that the first color is the lowest value for the
colorbar and the last is the highest.
position contains values from 0 to 1 to dictate the location of each color.
source: http://schubert.atmos.colostate.edu/~cslocum/custom_cmap.html
Chris Slocum, Colorado State University
'''
import matplotlib as mpl
import numpy as np
bit_rgb = np.linspace(0,1,256)
if position == None:
position = np.linspace(0,1,len(colors))
else:
if len(position) != len(colors):
sys.exit("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
sys.exit("position must start with 0 and end with 1")
if bit:
for i in range(len(colors)):
colors[i] = (bit_rgb[colors[i][0]],
bit_rgb[colors[i][1]],
bit_rgb[colors[i][2]])
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(position, colors):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return cmap
def image(Z,xnew,ynew,my_cmap=None,aspect='equal'):
"""
Creates pretty image. You need to specify:
"""
imshow(log10(Z),extent=[xnew[0],xnew[-1],ynew[0],ynew[-1]], cmap=my_cmap)
pylab.axes().set_aspect('equal')
colorbar()
circle2=Circle((0,0),1,color='k')
gca().add_artist(circle2)
savefig('tmp.png',transparent=True,dpi=150)
def wolframcmap():
"""
Returns colormap that matches closely the one used by default
for images in Wolfram Mathematica 11 (dark blue to orange).
I spent one hour playing around to reproduce it.
Usage:
>>> mycmap=nmmn.plots.wolframcmap()
>>> imshow(rho, cmap=mycmap)
.. figure:: ../figures/wolframcmap.png
:scale: 100 %
:alt: Image plotted using Wolfram's colormap
Image plotted using Wolfram's colormap.
"""
# Create a list of RGB tuples, recreates Mathematica colormap
colors3=[(51,91,150),(111,116,143),(167,136,110),(233,167,85),(251,212,141),(255,247,190)]
# Call the function make_cmap which returns your colormap
return make_cmap(colors3, bit=True)
def parulacmap():
"""
Creates the beautiful Parula colormap which is Matlab's default.
Usage:
>>> mycmap=nmmn.plots.parulacmap()
>>> imshow(rho, cmap=mycmap)
Code taken from `here <https://github.com/BIDS/colormap/blob/master/parula.py>`_
"""
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952,
0.779247619], [0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238,
0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571,
0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667,
0.8467], [0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571,
0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524,
0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048,
0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381,
0.7607190476], [0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429,
0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619,
0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524,
0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476,
0.4493904762], [0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667,
0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857,
0.2886428571], [0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857,
0.2164142857], [0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333,
0.0948380952], [0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
return LinearSegmentedColormap.from_list('parula', cm_data)
def turbocmap():
"""
Returns the Turbo colormap: an improved version of the awful jet colormap.
The look-up table contains 256 entries. Each entry is a floating point sRGB triplet.
Usage:
>>> turbo=nmmn.plots.turbocmap()
>>> imshow(rho, cmap=turbo)
Copyright 2019 Google LLC.
SPDX-License-Identifier: Apache-2.0
Author: Anton Mikhailov
References:
- `turbo colormap array <https://gist.github.com/mikhailov-work/ee72ba4191942acecc03fe6da94fc73f>`_
- Google AI `blog post <https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html>`_ describing the advantages of the colormap
"""
from matplotlib.colors import ListedColormap
turbo_colormap_data = [[0.18995,0.07176,0.23217],[0.19483,0.08339,0.26149],[0.19956,0.09498,0.29024],[0.20415,0.10652,0.31844],[0.20860,0.11802,0.34607],[0.21291,0.12947,0.37314],[0.21708,0.14087,0.39964],[0.22111,0.15223,0.42558],[0.22500,0.16354,0.45096],[0.22875,0.17481,0.47578],[0.23236,0.18603,0.50004],[0.23582,0.19720,0.52373],[0.23915,0.20833,0.54686],[0.24234,0.21941,0.56942],[0.24539,0.23044,0.59142],[0.24830,0.24143,0.61286],[0.25107,0.25237,0.63374],[0.25369,0.26327,0.65406],[0.25618,0.27412,0.67381],[0.25853,0.28492,0.69300],[0.26074,0.29568,0.71162],[0.26280,0.30639,0.72968],[0.26473,0.31706,0.74718],[0.26652,0.32768,0.76412],[0.26816,0.33825,0.78050],[0.26967,0.34878,0.79631],[0.27103,0.35926,0.81156],[0.27226,0.36970,0.82624],[0.27334,0.38008,0.84037],[0.27429,0.39043,0.85393],[0.27509,0.40072,0.86692],[0.27576,0.41097,0.87936],[0.27628,0.42118,0.89123],[0.27667,0.43134,0.90254],[0.27691,0.44145,0.91328],[0.27701,0.45152,0.92347],[0.27698,0.46153,0.93309],[0.27680,0.47151,0.94214],[0.27648,0.48144,0.95064],[0.27603,0.49132,0.95857],[0.27543,0.50115,0.96594],[0.27469,0.51094,0.97275],[0.27381,0.52069,0.97899],[0.27273,0.53040,0.98461],[0.27106,0.54015,0.98930],[0.26878,0.54995,0.99303],[0.26592,0.55979,0.99583],[0.26252,0.56967,0.99773],[0.25862,0.57958,0.99876],[0.25425,0.58950,0.99896],[0.24946,0.59943,0.99835],[0.24427,0.60937,0.99697],[0.23874,0.61931,0.99485],[0.23288,0.62923,0.99202],[0.22676,0.63913,0.98851],[0.22039,0.64901,0.98436],[0.21382,0.65886,0.97959],[0.20708,0.66866,0.97423],[0.20021,0.67842,0.96833],[0.19326,0.68812,0.96190],[0.18625,0.69775,0.95498],[0.17923,0.70732,0.94761],[0.17223,0.71680,0.93981],[0.16529,0.72620,0.93161],[0.15844,0.73551,0.92305],[0.15173,0.74472,0.91416],[0.14519,0.75381,0.90496],[0.13886,0.76279,0.89550],[0.13278,0.77165,0.88580],[0.12698,0.78037,0.87590],[0.12151,0.78896,0.86581],[0.11639,0.79740,0.85559],[0.11167,0.80569,0.84525],[0.10738,0.81381,0.83484],[0.10357,0.82177,0.82437],[0.10026,0.82955,0.81389],[0.09750,0.83714,0.80342],[0.09532,0.84455,0.79299],[0.09377,0.85175,0.78264],[0.09287,0.85875,0.77240],[0.09267,0.86554,0.76230],[0.09320,0.87211,0.75237],[0.09451,0.87844,0.74265],[0.09662,0.88454,0.73316],[0.09958,0.89040,0.72393],[0.10342,0.89600,0.71500],[0.10815,0.90142,0.70599],[0.11374,0.90673,0.69651],[0.12014,0.91193,0.68660],[0.12733,0.91701,0.67627],[0.13526,0.92197,0.66556],[0.14391,0.92680,0.65448],[0.15323,0.93151,0.64308],[0.16319,0.93609,0.63137],[0.17377,0.94053,0.61938],[0.18491,0.94484,0.60713],[0.19659,0.94901,0.59466],[0.20877,0.95304,0.58199],[0.22142,0.95692,0.56914],[0.23449,0.96065,0.55614],[0.24797,0.96423,0.54303],[0.26180,0.96765,0.52981],[0.27597,0.97092,0.51653],[0.29042,0.97403,0.50321],[0.30513,0.97697,0.48987],[0.32006,0.97974,0.47654],[0.33517,0.98234,0.46325],[0.35043,0.98477,0.45002],[0.36581,0.98702,0.43688],[0.38127,0.98909,0.42386],[0.39678,0.99098,0.41098],[0.41229,0.99268,0.39826],[0.42778,0.99419,0.38575],[0.44321,0.99551,0.37345],[0.45854,0.99663,0.36140],[0.47375,0.99755,0.34963],[0.48879,0.99828,0.33816],[0.50362,0.99879,0.32701],[0.51822,0.99910,0.31622],[0.53255,0.99919,0.30581],[0.54658,0.99907,0.29581],[0.56026,0.99873,0.28623],[0.57357,0.99817,0.27712],[0.58646,0.99739,0.26849],[0.59891,0.99638,0.26038],[0.61088,0.99514,0.25280],[0.62233,0.99366,0.24579],[0.63323,0.99195,0.23937],[0.64362,0.98999,0.23356],[0.65394,0.98775,0.22835],[0.66428,0.98524,0.22370],[0.67462,0.98246,0.21960],[0.68494,0.97941,0.21602],[0.69525,0.97610,0.21294],[0.70553,0.97255,0.21032],[0.71577,0.96875,0.20815],[0.72596,0.96470,0.20640],[0.73610,0.96043,0.20504],[0.74617,0.95593,0.20406],[0.75617,0.95121,0.20343],[0.76608,0.94627,0.20311],[0.77591,0.94113,0.20310],[0.78563,0.93579,0.20336],[0.79524,0.93025,0.20386],[0.80473,0.92452,0.20459],[0.81410,0.91861,0.20552],[0.82333,0.91253,0.20663],[0.83241,0.90627,0.20788],[0.84133,0.89986,0.20926],[0.85010,0.89328,0.21074],[0.85868,0.88655,0.21230],[0.86709,0.87968,0.21391],[0.87530,0.87267,0.21555],[0.88331,0.86553,0.21719],[0.89112,0.85826,0.21880],[0.89870,0.85087,0.22038],[0.90605,0.84337,0.22188],[0.91317,0.83576,0.22328],[0.92004,0.82806,0.22456],[0.92666,0.82025,0.22570],[0.93301,0.81236,0.22667],[0.93909,0.80439,0.22744],[0.94489,0.79634,0.22800],[0.95039,0.78823,0.22831],[0.95560,0.78005,0.22836],[0.96049,0.77181,0.22811],[0.96507,0.76352,0.22754],[0.96931,0.75519,0.22663],[0.97323,0.74682,0.22536],[0.97679,0.73842,0.22369],[0.98000,0.73000,0.22161],[0.98289,0.72140,0.21918],[0.98549,0.71250,0.21650],[0.98781,0.70330,0.21358],[0.98986,0.69382,0.21043],[0.99163,0.68408,0.20706],[0.99314,0.67408,0.20348],[0.99438,0.66386,0.19971],[0.99535,0.65341,0.19577],[0.99607,0.64277,0.19165],[0.99654,0.63193,0.18738],[0.99675,0.62093,0.18297],[0.99672,0.60977,0.17842],[0.99644,0.59846,0.17376],[0.99593,0.58703,0.16899],[0.99517,0.57549,0.16412],[0.99419,0.56386,0.15918],[0.99297,0.55214,0.15417],[0.99153,0.54036,0.14910],[0.98987,0.52854,0.14398],[0.98799,0.51667,0.13883],[0.98590,0.50479,0.13367],[0.98360,0.49291,0.12849],[0.98108,0.48104,0.12332],[0.97837,0.46920,0.11817],[0.97545,0.45740,0.11305],[0.97234,0.44565,0.10797],[0.96904,0.43399,0.10294],[0.96555,0.42241,0.09798],[0.96187,0.41093,0.09310],[0.95801,0.39958,0.08831],[0.95398,0.38836,0.08362],[0.94977,0.37729,0.07905],[0.94538,0.36638,0.07461],[0.94084,0.35566,0.07031],[0.93612,0.34513,0.06616],[0.93125,0.33482,0.06218],[0.92623,0.32473,0.05837],[0.92105,0.31489,0.05475],[0.91572,0.30530,0.05134],[0.91024,0.29599,0.04814],[0.90463,0.28696,0.04516],[0.89888,0.27824,0.04243],[0.89298,0.26981,0.03993],[0.88691,0.26152,0.03753],[0.88066,0.25334,0.03521],[0.87422,0.24526,0.03297],[0.86760,0.23730,0.03082],[0.86079,0.22945,0.02875],[0.85380,0.22170,0.02677],[0.84662,0.21407,0.02487],[0.83926,0.20654,0.02305],[0.83172,0.19912,0.02131],[0.82399,0.19182,0.01966],[0.81608,0.18462,0.01809],[0.80799,0.17753,0.01660],[0.79971,0.17055,0.01520],[0.79125,0.16368,0.01387],[0.78260,0.15693,0.01264],[0.77377,0.15028,0.01148],[0.76476,0.14374,0.01041],[0.75556,0.13731,0.00942],[0.74617,0.13098,0.00851],[0.73661,0.12477,0.00769],[0.72686,0.11867,0.00695],[0.71692,0.11268,0.00629],[0.70680,0.10680,0.00571],[0.69650,0.10102,0.00522],[0.68602,0.09536,0.00481],[0.67535,0.08980,0.00449],[0.66449,0.08436,0.00424],[0.65345,0.07902,0.00408],[0.64223,0.07380,0.00401],[0.63082,0.06868,0.00401],[0.61923,0.06367,0.00410],[0.60746,0.05878,0.00427],[0.59550,0.05399,0.00453],[0.58336,0.04931,0.00486],[0.57103,0.04474,0.00529],[0.55852,0.04028,0.00579],[0.54583,0.03593,0.00638],[0.53295,0.03169,0.00705],[0.51989,0.02756,0.00780],[0.50664,0.02354,0.00863],[0.49321,0.01963,0.00955],[0.47960,0.01583,0.01055]]
return ListedColormap(turbo_colormap_data)
def jointplot(X,Y,xlabel=None,ylabel=None,binsim=40,binsh=20,contour=True):
"""
Plots the joint distribution of posteriors for X1 and X2, including the 1D
histograms showing the median and standard deviations.
The work that went in creating this nice method is shown, step by step, in
the ipython notebook "error contours.ipynb". Sources of inspiration:
- http://python4mpia.github.io/intro/quick-tour.html
- http://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
Usage:
>>> jointplot(M.rtr.trace(),M.mdot.trace(),xlabel='$\log \ r_{\\rm tr}$', ylabel='$\log \ \dot{m}$')
gives the following plot.
.. figure:: ../figures/jointplot.png
:scale: 100 %
:alt: Two-dimensional kernel density distribution.
Two-dimensional kernel density distribution, along with one-dimensional histograms of each distribution.
"""
import scipy.stats
# Generates 2D histogram for image
histt, xt, yt = numpy.histogram2d(X, Y, bins=[binsim,binsim], normed=False)
histt = numpy.transpose(histt) # Beware: numpy switches axes, so switch back.
# assigns correct proportions to subplots
fig=pylab.figure()
gs = pylab.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3], wspace=0.001, hspace=0.001)
con=pylab.subplot(gs[2])
histx=pylab.subplot(gs[0], sharex=con)
histy=pylab.subplot(gs[3], sharey=con)
# Image
con.imshow(histt,extent=[xt[0],xt[-1], yt[0],yt[-1]],origin='lower',cmap=pylab.cm.gray_r,aspect='auto')
# Overplot with error contours 1,2 sigma
if contour==True:
pdf = scipy.stats.gaussian_kde([X, Y])
x,y = pylab.meshgrid(xt,yt)
z = numpy.array(pdf.evaluate([x.flatten(),y.flatten()])).reshape(x.shape)
# the [61,15] values were obtained by trial and error until the joint confidence
# contours matched the confidence intervals from the individual X,Y
s=scipy.stats.scoreatpercentile(pdf(pdf.resample(1000)), [61,15])
cs=con.contour(x,y,z, levels=s, extent=[x[0],x[-1], y[0],y[-1]], linestyles=['-','-','-'], colors=['black','blue'])
# use dictionary in order to assign your own labels to the contours.
#fmtdict = {s[0]:r'$1\sigma$',s[1]:r'$2\sigma$'}
#con.clabel(cs, fmt=fmtdict, inline=True, fontsize=20)
if xlabel!=None: con.set_xlabel(xlabel)
if ylabel!=None: con.set_ylabel(ylabel)
# X-axis histogram
histx.hist(X, binsh, histtype='stepfilled',facecolor='lightblue')
pylab.setp(histx.get_xticklabels(), visible=False) # no X label
pylab.setp(histx.get_yticklabels(), visible=False) # no Y label
# Vertical lines with median and 1sigma confidence
yax=histx.set_ylim()
histx.plot([numpy.median(X),numpy.median(X)],[yax[0],yax[1]],'k-',linewidth=2) # median
xsd=scipy.stats.scoreatpercentile(X, [15.87,84.13])
histx.plot([xsd[0],xsd[0]],[yax[0],yax[1]],'k--') # -1sd
histx.plot([xsd[-1],xsd[-1]],[yax[0],yax[1]],'k--') # +1sd
# Y-axis histogram
histy.hist(Y, binsh, histtype='stepfilled', orientation='horizontal',facecolor='lightyellow')
pylab.setp(histy.get_yticklabels(), visible=False) # no Y label
pylab.setp(histy.get_xticklabels(), visible=False) # no X label
# Vertical lines with median and 1sigma confidence
xax=histy.set_xlim()
histy.plot([xax[0],xax[1]],[numpy.median(Y),numpy.median(Y)],'k-',linewidth=2) # median
ysd=scipy.stats.scoreatpercentile(Y, [15.87,84.13])
histy.plot([xax[0],xax[1]],[ysd[0],ysd[0]],'k--') # -1sd
histy.plot([xax[0],xax[1]],[ysd[-1],ysd[-1]],'k--') # +1sd
def symlog(x, C=1./numpy.log(10.)):
"""
Applies a modified logarithm function to x that handles negative
values while maintaining continuity across
zero. This function solves a very concrete problem: how to handle
data that spans a huge range and has also negative values? log10
will fail. This is the answer.
The transformation is defined in an article from the journal
Measurement Science and Technology (Webber, 2012):
y = sign(x)*(log10(1+abs(x)/(10^C)))
where the scaling constant C determines the resolution of the data
around zero. The smallest order of magnitude shown on either side of
zero will be 10^ceil(C).
Reference: MATHWORKS symlog <https://www.mathworks.com/matlabcentral/fileexchange/57902-symlog>
"""
return numpy.sign(x)*(numpy.log10(1+numpy.abs(x)/(10**C)))
| 38.579487
| 6,680
| 0.707298
|
dec2d3d174d005b1c74a52b2d5857caff10c6c8f
| 3,178
|
py
|
Python
|
zentral/contrib/santa/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 634
|
2015-10-30T00:55:40.000Z
|
2022-03-31T02:59:00.000Z
|
zentral/contrib/santa/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 145
|
2015-11-06T00:17:33.000Z
|
2022-03-16T13:30:31.000Z
|
zentral/contrib/santa/urls.py
|
arubdesu/zentral
|
ac0fe663f6e1c27f9a9f55a7500a87e6ac7d9190
|
[
"Apache-2.0"
] | 103
|
2015-11-07T07:08:49.000Z
|
2022-03-18T17:34:36.000Z
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from . import views
app_name = "santa"
urlpatterns = [
# configuration / enrollment
url(r'^configurations/$',
views.ConfigurationListView.as_view(),
name='configuration_list'),
url(r'^configurations/create/$',
views.CreateConfigurationView.as_view(),
name='create_configuration'),
url(r'^configurations/(?P<pk>\d+)/$',
views.ConfigurationView.as_view(),
name='configuration'),
url(r'^configurations/(?P<pk>\d+)/update/$',
views.UpdateConfigurationView.as_view(),
name='update_configuration'),
url(r'^configurations/(?P<pk>\d+)/enrollments/create/$',
views.CreateEnrollmentView.as_view(),
name='create_enrollment'),
url(r'^configurations/(?P<configuration_pk>\d+)/enrollments/(?P<pk>\d+)/configuration_plist/$',
views.EnrollmentConfigurationView.as_view(response_type="plist"),
name='enrollment_configuration_plist'),
url(r'^configurations/(?P<configuration_pk>\d+)/enrollments/(?P<pk>\d+)/configuration_profile/$',
views.EnrollmentConfigurationView.as_view(response_type="configuration_profile"),
name='enrollment_configuration_profile'),
# rules
url(r'^configurations/(?P<configuration_pk>\d+)/rules/$',
views.ConfigurationRulesView.as_view(),
name='configuration_rules'),
url(r'^configurations/(?P<configuration_pk>\d+)/rules/create/$',
views.CreateConfigurationRuleView.as_view(),
name='create_configuration_rule'),
url(r'^configurations/(?P<configuration_pk>\d+)/rules/(?P<pk>\d+)/update/$',
views.UpdateConfigurationRuleView.as_view(),
name='update_configuration_rule'),
url(r'^configurations/(?P<configuration_pk>\d+)/rules/(?P<pk>\d+)/delete/$',
views.DeleteConfigurationRuleView.as_view(),
name='delete_configuration_rule'),
url(r'configurations/(?P<configuration_pk>\d+)/rules/pick_binary/$',
views.PickRuleBinaryView.as_view(),
name='pick_rule_binary'),
url(r'configurations/(?P<configuration_pk>\d+)/rules/pick_bundle/$',
views.PickRuleBundleView.as_view(),
name='pick_rule_bundle'),
url(r'configurations/(?P<configuration_pk>\d+)/rules/pick_certificate/$',
views.PickRuleCertificateView.as_view(),
name='pick_rule_certificate'),
# API endpoints
url(r'^sync/(?P<enrollment_secret>\S+)/preflight/(?P<machine_id>\S+)$',
csrf_exempt(views.PreflightView.as_view()), name='preflight'),
url(r'^sync/(?P<enrollment_secret>\S+)/ruledownload/(?P<machine_id>\S+)$',
csrf_exempt(views.RuleDownloadView.as_view()), name='ruledownload'),
url(r'^sync/(?P<enrollment_secret>\S+)/eventupload/(?P<machine_id>\S+)$',
csrf_exempt(views.EventUploadView.as_view()), name='eventupload'),
url(r'^sync/(?P<enrollment_secret>\S+)/postflight/(?P<machine_id>\S+)$',
csrf_exempt(views.PostflightView.as_view()), name='postflight'),
]
setup_menu_cfg = {
'items': (
('configuration_list', 'Configurations', False, ('santa.view_configuration',)),
)
}
| 45.4
| 101
| 0.677785
|
8c229dedab1622cbfd167b77297e10503a4b381e
| 7,518
|
py
|
Python
|
examples/kubernetes/scripts/hmem_experiments/workload_runner.py
|
Creatone/workload-collocation-agent
|
826a2ae67fd23feef74e18d27c0e677f83994add
|
[
"Apache-2.0"
] | null | null | null |
examples/kubernetes/scripts/hmem_experiments/workload_runner.py
|
Creatone/workload-collocation-agent
|
826a2ae67fd23feef74e18d27c0e677f83994add
|
[
"Apache-2.0"
] | 1
|
2021-02-23T19:19:47.000Z
|
2021-02-23T19:19:47.000Z
|
examples/kubernetes/scripts/hmem_experiments/workload_runner.py
|
Creatone/workload-collocation-agent
|
826a2ae67fd23feef74e18d27c0e677f83994add
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import subprocess
from dataclasses import dataclass
from typing import Dict
from runner import default_shell_run, annotate
from kernel_parameters import set_numa_balancing, set_toptier_scale_factor
from time import sleep, time
from scenarios import Scenario, ExperimentType
EXPERIMENT_DESCRIPTION = {
ExperimentType.DRAM: 'workloads running exclusively on dram',
ExperimentType.PMEM: 'workloads running exclusively on pmem',
ExperimentType.HMEM_NUMA_BALANCING: 'workloads running on dram and pmem '
'with numa balancing turned on',
ExperimentType.HMEM_NO_NUMA_BALANCING: 'workloads running on dram and pmem '
'with numa balancing turned off',
ExperimentType.COLD_START: 'workload starts to run on pmem and after set time passes '
'can move to dram if necessary for workload performance',
ExperimentType.TOPTIER: 'workloads have toptier limit; if limit is exceeded some of the '
'memory from dram goes to pmem',
ExperimentType.TOPTIER_WITH_COLDSTART: 'workload starts to run on pmem and after set time '
'passes can move to dram if necessary for workload '
'performance; workloads have toptier limit; if '
'limit is exceeded some of the memory from dram '
'goes to pmem'
}
@dataclass
class Experiment:
name: str
number_of_workloads: Dict[str, int]
type: ExperimentType
description: str
start_timestamp: float = None
stop_timestamp: float = None
@dataclass
class ExperimentConfiguration:
numa_balancing: bool
toptier_scale_factor: str = '2000'
ONLY_NUMA_BALANCING_CONF = ExperimentConfiguration(numa_balancing=True)
NUMA_BALANCING_OFF_CONF = ExperimentConfiguration(numa_balancing=False)
TOPTIER_CONF = ExperimentConfiguration(numa_balancing=True, toptier_scale_factor='10000')
EXPERIMENT_CONFS = {ExperimentType.DRAM: NUMA_BALANCING_OFF_CONF,
ExperimentType.PMEM: NUMA_BALANCING_OFF_CONF,
ExperimentType.HMEM_NUMA_BALANCING: ONLY_NUMA_BALANCING_CONF,
ExperimentType.HMEM_NO_NUMA_BALANCING: ExperimentConfiguration(
numa_balancing=False),
ExperimentType.COLD_START: ONLY_NUMA_BALANCING_CONF,
ExperimentType.TOPTIER: TOPTIER_CONF,
ExperimentType.TOPTIER_WITH_COLDSTART: TOPTIER_CONF}
def experiment_to_json(experiment: Experiment, output_file: str):
experiment_dict = {'meta':
{'name': experiment.name,
'description': EXPERIMENT_DESCRIPTION[experiment.type],
'params': {
'workloads_count': experiment.number_of_workloads,
'type': experiment.type.value,
}
},
'experiment': {
'description': experiment.description,
'start': experiment.start_timestamp,
'end': experiment.stop_timestamp
}
}
with open(output_file, 'w+') as experiment_json_file:
json.dump(experiment_dict, experiment_json_file)
def _scale_workload(workload_name, number_of_workloads=1):
cmd_scale = "kubectl scale sts {} --replicas={}".format(
workload_name, number_of_workloads)
default_shell_run(cmd_scale)
def _set_configuration(configuration: ExperimentConfiguration):
set_numa_balancing(configuration.numa_balancing)
set_toptier_scale_factor(configuration.toptier_scale_factor)
def _run_workloads(number_of_workloads: Dict,
sleep_duration: int,
reset_workload=True):
for workload_name in number_of_workloads.keys():
_scale_workload(workload_name, number_of_workloads[workload_name])
sleep(sleep_duration)
if reset_workload:
for workload_name in number_of_workloads.keys():
_scale_workload(workload_name, 0)
def run_experiment(scenario: Scenario, number_of_workloads):
# making sure that toptier limit value is as should be
# even if previous toptier limit experiment was stopped before
# restoring the old value
if not scenario.modify_toptier_limit and (
scenario.experiment_type == ExperimentType.TOPTIER_WITH_COLDSTART or
scenario.experiment_type == ExperimentType.TOPTIER):
for workload_name in scenario.workloads_count.keys():
base_toptier_value = get_base_toptier_limit(workload_name)
patch_toptier_limit(workload_name, base_toptier_value)
_set_configuration(EXPERIMENT_CONFS[scenario.experiment_type])
for worklad_name, toptier_value in scenario.modify_toptier_limit.items():
patch_toptier_limit(worklad_name, toptier_value)
start_timestamp = time()
annotate('Running experiment: {}'.format(scenario.name))
_run_workloads(number_of_workloads, scenario.sleep_duration,
scenario.reset_workloads_between_steps)
stop_timestamp = time()
# restore old toptier value
for workload_name in scenario.modify_toptier_limit.keys():
patch_toptier_limit(workload_name, get_base_toptier_limit(workload_name))
return Experiment(scenario.name, number_of_workloads, scenario.experiment_type,
EXPERIMENT_DESCRIPTION[scenario.experiment_type],
start_timestamp, stop_timestamp)
TOPTIER_ANNOTATION_KEY = 'toptierlimit.cri-resource-manager.intel.com/pod'
SPEC = 'spec'
TEMPLATE = 'template'
METADATA = 'metadata'
ANNOTATIONS = 'annotations'
def get_base_toptier_limit(workload_name):
"""Returns toptier limit as declared in original stateful set definition.
Patching stateful set's toptier limit value for pod template WILL NOT change this value"""
get_sts_cmd = 'kubectl get sts {} -o json'.format(workload_name)
statefulset_spec = subprocess.run([get_sts_cmd], stdout=subprocess.PIPE, shell=True)
json_output = json.loads(statefulset_spec.stdout.decode('utf-8'))
# this value should never change for the stateful set, that is why we use it
# to read base toptier limit for given stateful set
toptier_limit = json_output[METADATA][ANNOTATIONS][TOPTIER_ANNOTATION_KEY]
return toptier_limit
def patch_toptier_limit(workload_name, toptier_value):
patch = {SPEC:
{TEMPLATE:
{METADATA:
{ANNOTATIONS:
{TOPTIER_ANNOTATION_KEY: toptier_value}}}}}
json_patch = json.dumps(patch)
patch_cmd = 'kubectl patch statefulset {} -p \'{}\''.format(workload_name, json_patch)
default_shell_run(patch_cmd)
| 43.206897
| 95
| 0.683958
|
b64b8634ac83b8fa723d7ccfd3c59b96193acf20
| 3,179
|
py
|
Python
|
koapy/backtrader/KrxHistoricalDailyPriceData.py
|
resoliwan/koapy
|
b0616f252bb3588695dfb37c7d9b8580a65649a3
|
[
"MIT"
] | 1
|
2021-09-25T22:33:01.000Z
|
2021-09-25T22:33:01.000Z
|
koapy/backtrader/KrxHistoricalDailyPriceData.py
|
resoliwan/koapy
|
b0616f252bb3588695dfb37c7d9b8580a65649a3
|
[
"MIT"
] | null | null | null |
koapy/backtrader/KrxHistoricalDailyPriceData.py
|
resoliwan/koapy
|
b0616f252bb3588695dfb37c7d9b8580a65649a3
|
[
"MIT"
] | 1
|
2021-11-12T15:33:29.000Z
|
2021-11-12T15:33:29.000Z
|
import pandas as pd
from backtrader import TimeFrame, date2num
from backtrader.feed import DataBase
from tqdm import tqdm
from koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader import (
KrxHistoricalDailyPriceDataForBacktestLoader,
)
class KrxHistoricalDailyPriceData(DataBase):
# pylint: disable=no-member
params = (
("loader", None),
("symbol", None),
("name", None),
("fromdate", None),
("todate", None),
("compression", 1),
("timeframe", TimeFrame.Days),
("calendar", None),
("lazy", False),
)
lines = (
"amount",
"marketcap",
"shares",
)
def __init__(self):
super().__init__()
assert self.p.loader
assert self.p.symbol
assert self.p.timeframe == TimeFrame.Days
assert self.p.compression == 1
self.p.name = self.p.name or self.p.symbol or ""
self._cursor = None
self._started_already = False
if not self.p.lazy:
self.start()
def _close_cursor(self):
if self._cursor is not None:
self._cursor.close()
self._cursor = None
def _initialize_cursor(self):
self._close_cursor()
self._cursor = self.p.loader.load_as_cursor(
self.p.symbol, start_time=self.p.fromdate, end_time=self.p.todate
)
def start(self):
if not self._started_already:
self._initialize_cursor()
self._started_already = True
def stop(self):
self._close_cursor()
self._started_already = False
def _load(self):
if self._cursor is None:
return False
try:
date, open_, high, low, close, volume, amount, marcap, shares = next(
self._cursor
)
except StopIteration:
return False
else:
dt = pd.Timestamp(date)
self.lines.datetime[0] = date2num(dt)
self.lines.open[0] = open_
self.lines.high[0] = high
self.lines.low[0] = low
self.lines.close[0] = close
self.lines.volume[0] = volume
self.lines.openinterest[0] = 0.0
self.lines.amount[0] = amount
self.lines.marketcap[0] = marcap
self.lines.shares[0] = shares
return True
@classmethod
def adddata_fromfile(
cls,
cerebro,
filename,
symbols=None,
fromdate=None,
todate=None,
progress_bar=True,
):
loader = KrxHistoricalDailyPriceDataForBacktestLoader(filename)
if symbols is None:
symbols = loader.get_symbols()
progress = tqdm(symbols, disable=not progress_bar)
for symbol in progress:
progress.set_description("Adding Symbol [%s]" % symbol)
data = cls(
loader=loader,
symbol=symbol,
fromdate=fromdate,
todate=todate,
name=symbol,
) # pylint: disable=unexpected-keyword-arg
cerebro.adddata(data, name=data.p.name)
| 26.057377
| 81
| 0.559295
|
cab891add3d6c52028667ed2373c01232eba9e02
| 26
|
py
|
Python
|
terrascript/mysql/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/mysql/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/mysql/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
"""2017-11-28 18:08:12"""
| 13
| 25
| 0.538462
|
4e091e643dce0adeee9bd309d1166a4658336e3d
| 6,334
|
py
|
Python
|
easysql.py
|
JBRonaldHandiwinata/easypysql
|
3aea396c415108504de872db30f6c4fca49fa4f0
|
[
"MIT"
] | null | null | null |
easysql.py
|
JBRonaldHandiwinata/easypysql
|
3aea396c415108504de872db30f6c4fca49fa4f0
|
[
"MIT"
] | null | null | null |
easysql.py
|
JBRonaldHandiwinata/easypysql
|
3aea396c415108504de872db30f6c4fca49fa4f0
|
[
"MIT"
] | null | null | null |
import MySQLdb
import time
from MySQLdb.cursors import DictCursor
from DBUtils.PooledDB import PooledDB
class EasySql(object):
__pool = None
DB_HOST = ""
DB_NAME = ""
DB_USER = ""
DB_PWD = ""
DB_PORT = 3306
DB_CHARSET = "utf8"
def __init__(self):
self._conn = self.__getConn()
self._cursor = self._conn.cursor()
@classmethod
def __getConn(cls):
__pool = cls.__pool
if cls.__pool is None:
success = False
count = 0
while not success:
try:
__pool = PooledDB(creator=MySQLdb, mincached=1, maxcached=100,
host=cls.DB_HOST, port=cls.DB_PORT, user=cls.DB_USER, passwd=cls.DB_PWD, db=cls.DB_NAME,
use_unicode=True, charset=cls.DB_CHARSET, cursorclass=DictCursor)
if __pool is not None:
success = True
except MySQLdb.OperationalError as e:
if e.args[0] in (2006, 2013, 2003, 2006):
print("DB-CONNECTION ERROR: ", str(e.args[0]) + "-" + str(e.args[1]))
else:
print("UNKNOWN DB ERROR: ", str(e.args[0]) + "-" + str(e.args[1]))
success = False
time.sleep(2)
if count > 100000:
raise
count += 1
return __pool.connection()
def fetch_rows(self, sql, values, many=0):
self._cursor.execute(sql, values)
retval = self._cursor.fetchone() if many == 0 else self._cursor.fetchall()
return retval
def insert_rows(self, tbl, colandval, on_duplicate_key_update=False, on_duplicate_key_update_condition=""):
ct = 0
setcol = ""
lv = ""
isMany = False
if isinstance(colandval, list) or isinstance(colandval, tuple):
isMany = True
listtuple = list()
for p in colandval:
cts = 0
vals = list()
for key, val in p.items():
if key == "to":
key = "`to`"
if key == "status":
key = "`status`"
if key == "type":
key = "`type`"
if key == "from":
key = "`from`"
vals.append(val)
if ct == 0:
sep = "" if cts == (len(p.items()) - 1) else ","
setcol += key + sep
lv += "%s" + sep
cts += 1
listtuple.append(tuple(vals))
ct += 1
elif isinstance(colandval, dict):
vals = list()
for key, val in colandval.items():
if key == "to":
key = "`to`"
if key == "status":
key = "`status`"
if key == "type":
key = "`type`"
if key == "from":
key = "`from`"
vals.append(val)
sep = "" if ct == (len(colandval.items()) - 1) else ","
setcol += key + sep
lv += "%s" + sep
ct += 1
listtuple = tuple(vals)
else:
raise Exception('SQL INSERT err: Wrong data type set in SECOND parameter')
sql = "INSERT into " + tbl + " " + "(" + setcol + ")" + " VALUES " + "(" + lv + ")"
if on_duplicate_key_update and on_duplicate_key_update_condition:
sql += " " + "ON DUPLICATE KEY UPDATE" + " " + on_duplicate_key_update_condition
if isMany:
self._cursor.executemany(sql, listtuple)
else:
self._cursor.execute(sql, listtuple)
return self._cursor.lastrowid
def update_rows(self, tbl, dictset, dictwhere):
setval = ""
lv = ""
if isinstance(dictset, dict):
ct = 0
vals = list()
for key, val in dictset.items():
vals.append(val)
sep = " " if ct == (len(dictset.items()) - 1) else ", "
setval += key + "=%s" + sep
ct += 1
listtuple_ds = tuple(vals)
else:
raise Exception('SQL UPDATE err: Wrong data type set in FIRST parameter')
if isinstance(dictwhere, dict):
ct = 0
vals = list()
for key, val in dictwhere.items():
vals.append(val)
sep = " " if ct == (len(dictwhere.items()) - 1) else " AND "
lv += key + "=%s" + sep
ct += 1
listtuple_dw = tuple(vals)
else:
raise Exception('SQL UPDATE err: Wrong data type set in SECOND parameter')
thistuple = listtuple_ds + listtuple_dw
sql = "UPDATE " + tbl + " SET " + setval + " WHERE " + lv
return self.__query(sql, thistuple)
def __query(self, sql, param=None):
if param is None:
count = self._cursor.execute(sql)
else:
count = self._cursor.execute(sql, param)
return count
def __getInsertId(self):
"""
:
:return:
"""
self._cursor.execute("SELECT @@IDENTITY AS id")
result = self._cursor.fetchall()
return result[0]['id']
def begin(self):
"""
:keyword: Open a transaction
:return: None
"""
try:
self._conn.autocommit(0)
except:
pass
def end(self, option='commit'):
"""
:keyword: Closing a transaction
:param option: commit or rollback
:return:
"""
if option == 'commit':
self._conn.commit()
else:
self._conn.rollback()
def dispose(self, isEnd=1):
"""
:keyword: Release connection pool resource
:param isEnd: 1 or 0
:return:
"""
if isEnd == 1:
self.end('commit')
else:
self.end('rollback')
def closing(self):
"""
Closing a transaction
:return:
"""
self._cursor.close()
self._conn.close()
| 32.649485
| 126
| 0.45832
|
ff04a75bd16b4647c3bdb09fdb0c8f9a125c8c6b
| 41,650
|
py
|
Python
|
scripts/arcrest/geometryservice/geometryservice.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | 5
|
2019-01-12T13:57:52.000Z
|
2021-05-04T01:24:53.000Z
|
scripts/arcrest/geometryservice/geometryservice.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | null | null | null |
scripts/arcrest/geometryservice/geometryservice.py
|
datastark/crime-analysis-toolbox
|
af45e4ba59284d78b1c7d3e208a05e5001d024dd
|
[
"Apache-2.0"
] | 1
|
2018-08-11T19:09:57.000Z
|
2018-08-11T19:09:57.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from .._abstract import abstract
from ..common.geometry import Point, Polyline, Polygon, MultiPoint, Envelope
import json
########################################################################
class GeometryService(abstract.BaseAGSServer):
"""
A geometry service contains utility methods that provide access to
sophisticated and frequently used geometric operations. An ArcGIS
Server web site can only expose one geometry service with the static
name "Geometry".
"""
_url = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_json_dict = None
_json_string = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler=None, proxy_url=None, proxy_port=None):
"""Constructor"""
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
self.__init()
#----------------------------------------------------------------------
def __init(self):
"""loads the json values"""
res = self._get(url=self._url,
param_dict={"f": "json"},
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json_dict = res
self._json_string = json.dumps(self._json_dict)
for k,v in self._json_dict.items():
setattr(self, k, v)
#----------------------------------------------------------------------
def __str__(self):
"""returns object as a string"""
if self._json_string is None:
self.__init()
return self._json_string
#----------------------------------------------------------------------
def __iter__(self):
"""returns the JSON response in key/value pairs"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.items():
yield [k,v]
#----------------------------------------------------------------------
def areasAndLengths(self,
polygons,
lengthUnit,
areaUnit,
calculationType,
):
"""
The areasAndLengths operation is performed on a geometry service
resource. This operation calculates areas and perimeter lengths
for each polygon specified in the input array.
Inputs:
polygons - The array of polygons whose areas and lengths are
to be computed.
lengthUnit - The length unit in which the perimeters of
polygons will be calculated. If calculationType
is planar, then lengthUnit can be any esriUnits
constant. If lengthUnit is not specified, the
units are derived from sr. If calculationType is
not planar, then lengthUnit must be a linear
esriUnits constant, such as esriSRUnit_Meter or
esriSRUnit_SurveyMile. If lengthUnit is not
specified, the units are meters. For a list of
valid units, see esriSRUnitType Constants and
esriSRUnit2Type Constant.
areaUnit - The area unit in which areas of polygons will be
calculated. If calculationType is planar, then
areaUnit can be any esriUnits constant. If
areaUnit is not specified, the units are derived
from sr. If calculationType is not planar, then
areaUnit must be a linear esriUnits constant such
as esriSRUnit_Meter or esriSRUnit_SurveyMile. If
areaUnit is not specified, then the units are
meters. For a list of valid units, see
esriSRUnitType Constants and esriSRUnit2Type
constant.
The list of valid esriAreaUnits constants include,
esriSquareInches | esriSquareFeet |
esriSquareYards | esriAcres | esriSquareMiles |
esriSquareMillimeters | esriSquareCentimeters |
esriSquareDecimeters | esriSquareMeters | esriAres
| esriHectares | esriSquareKilometers.
calculationType - The type defined for the area and length
calculation of the input geometries. The
type can be one of the following values:
planar - Planar measurements use 2D
Euclidean distance to calculate
area and length. Th- should
only be used if the area or
length needs to be calculated in
the given spatial reference.
Otherwise, use preserveShape.
geodesic - Use this type if you want to
calculate an area or length using
only the vertices of the polygon
and define the lines between the
points as geodesic segments
independent of the actual shape
of the polygon. A geodesic
segment is the shortest path
between two points on an ellipsoid.
preserveShape - This type calculates the
area or length of the geometry on
the surface of the Earth
ellipsoid. The shape of the
geometry in its coordinate system
is preserved.
Output:
JSON as dictionary
"""
url = self._url + "/areasAndLengths"
params = {
"f" : "json",
"lengthUnit" : lengthUnit,
"areaUnit" : {"areaUnit" : areaUnit},
"calculationType" : calculationType
}
if isinstance(polygons, list) and len(polygons) > 0:
p = polygons[0]
if isinstance(p, Polygon):
params['sr'] = p.spatialReference['wkid']
params['polygons'] = [poly.asDictionary for poly in polygons]
del p
else:
return "No polygons provided, please submit a list of polygon geometries"
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def __geometryListToGeomTemplate(self, geometries):
"""
converts a list of common.Geometry objects to the geometry
template value
Input:
geometries - list of common.Geometry objects
Output:
Dictionary in geometry service template
"""
template = {"geometryType": None,
"geometries" : []}
if isinstance(geometries, list) and len(geometries) > 0:
for g in geometries:
if isinstance(g, Polyline):
template['geometryType'] = "esriGeometryPolyline"
elif isinstance(g, Polygon):
template['geometryType'] = "esriGeometryPolygon"
elif isinstance(g, Point):
template['geometryType'] = "esriGeometryPoint"
elif isinstance(g, MultiPoint):
template['geometryType'] = "esriGeometryMultipoint"
elif isinstance(g, Envelope):
template['geometryType'] = "esriGeometryEnvelope"
else:
raise AttributeError("Invalid geometry type")
template['geometries'].append(g.asDictionary)
del g
return template
return template
#----------------------------------------------------------------------
def __geometryToGeomTemplate(self, geometry):
"""
Converts a single geometry object to a geometry service geometry
template value.
Input:
geometry - ArcREST geometry object
Output:
python dictionary of geometry template
"""
template = {"geometryType": None,
"geometry" : None}
if isinstance(geometry, Polyline):
template['geometryType'] = "esriGeometryPolyline"
elif isinstance(geometry, Polygon):
template['geometryType'] = "esriGeometryPolygon"
elif isinstance(geometry, Point):
template['geometryType'] = "esriGeometryPoint"
elif isinstance(geometry, MultiPoint):
template['geometryType'] = "esriGeometryMultipoint"
elif isinstance(geometry, Envelope):
template['geometryType'] = "esriGeometryEnvelope"
else:
raise AttributeError("Invalid geometry type")
template['geometry'] = geometry.asDictionary
return template
#----------------------------------------------------------------------
def __geomToStringArray(self, geometries, returnType="str"):
""" function to convert the geomtries to strings """
listGeoms = []
for g in geometries:
if isinstance(g, Point):
listGeoms.append(g.asDictionary)
elif isinstance(g, Polygon):
listGeoms.append(g.asDictionary) #json.dumps(
elif isinstance(g, Polyline):
listGeoms.append({'paths' : g.asDictionary['paths']})
if returnType == "str":
return json.dumps(listGeoms)
elif returnType == "list":
return listGeoms
else:
return json.dumps(listGeoms)
#----------------------------------------------------------------------
def autoComplete(self,
polygons=[],
polylines=[],
sr=None
):
"""
The autoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons.
It constructs polygons that fill in the gaps between existing
polygons and a set of polylines.
Inputs:
polygons - array of Polygon objects
polylines - list of Polyline objects
sr - spatial reference of the input geometries WKID
"""
url = self._url + "/autoComplete"
params = {"f":"json"}
if sr is not None:
params['sr'] = sr
params['polygons'] = self.__geomToStringArray(polygons)
params['polylines'] = self.__geomToStringArray(polylines)
return self._get(url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def buffer(self,
geometries,
inSR,
distances,
units,
outSR=None,
bufferSR=None,
unionResults=True,
geodesic=True
):
"""
The buffer operation is performed on a geometry service resource
The result of this operation is buffered polygons at the
specified distances for the input geometry array. Options are
available to union buffers and to use geodesic distance.
Inputs:
"""
url = self._url + "/buffer"
params = {
"f" : "json",
"inSR" : inSR,
"geodesic" : geodesic,
"unionResults" : unionResults
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
if isinstance(distances, list):
distances = [str(d) for d in distances]
params['distances'] = ",".join(distances)
else:
params['distances'] = str(distances)
params['units'] = units
if bufferSR is not None:
params['bufferSR'] = bufferSR
if outSR is not None:
params['outSR'] = outSR
return self._get(url, param_dict=params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def convexHull(self,
geometries,
sr=None):
""""""
url = self._url + "/convexHull"
params = {
"f" : "json"
}
if isinstance(geometries, list) and len(geometries) > 0:
g = geometries[0]
if sr is not None:
params['sr'] = sr
else:
params['sr'] = g._wkid
if isinstance(g, Polygon):
params['geometries'] = {"geometryType": "esriGeometryPolygon",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Point):
params['geometries'] = {"geometryType": "esriGeometryPoint",
"geometries" : self.__geomToStringArray(geometries, "list")}
elif isinstance(g, Polyline):
params['geometries'] = {"geometryType": "esriGeometryPolyline",
"geometries" : self.__geomToStringArray(geometries, "list")}
else:
return None
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def cut(self,
cutter,
target,
sr=None):
""""""
url = self._url + "/cut"
params = {
"f" : "json"
}
if sr is not None:
params['sr'] = sr
if isinstance(cutter, Polyline):
params['cutter'] = cutter.asDictionary
else:
raise AttributeError("Input must be type Polyline")
if isinstance(target, list) and len(target) > 0:
geoms = []
template = {"geometryType": "",
"geometries" : []}
for g in target:
if isinstance(g, Polygon):
template['geometryType'] = "esriGeometryPolygon"
template['geometries'].append(g.asDictionary)
if isinstance(g, Polyline):
template['geometryType'] = "esriGeometryPolyline"
template['geometries'].append(g.asDictionary)
else:
AttributeError("Invalid geometry in target, entries can only be Polygon or Polyline")
del g
params['target'] = template
else:
AttributeError("You must provide at least 1 Polygon/Polyline geometry in a list")
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def density(self,
geometries,
sr,
maxSegmentLength,
lengthUnit,
geodesic=False,
):
""""""
url = self._url + "/densify"
params = {
"f" : "json",
"sr" : sr,
"maxSegmentLength" : maxSegmentLength,
"lengthUnit" : lengthUnit,
"geodesic" : geodesic
}
if isinstance(geometries, list) and len(geometries) > 0:
template = {"geometryType": None,
"geometries" : []}
for g in geometries:
if isinstance(g, Polyline):
template['geometryType'] = "esriGeometryPolyline"
elif isinstance(g, Polygon):
template['geometryType'] = "esriGeometryPolygon"
else:
raise AttributeError("Invalid geometry type")
template['geometries'].append(g.asDictionary)
params['geometries'] = template
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port = self._proxy_port)
#----------------------------------------------------------------------
def difference(self,
geometries,
sr,
geometry
):
""""""
url = self._url + "/difference"
params = {
"f" : "json",
"sr" : sr
}
if isinstance(geometries, list) and len(geometries) > 0:
template = {"geometryType": None,
"geometries" : []}
for g in geometries:
if isinstance(g, Polyline):
template['geometryType'] = "esriGeometryPolyline"
elif isinstance(g, Polygon):
template['geometryType'] = "esriGeometryPolygon"
elif isinstance(g, Point):
template['geometryType'] = "esriGeometryPoint"
elif isinstance(g, Point):
template['geometryType'] = "esriGeometryMultipoint"
else:
raise AttributeError("Invalid geometry type")
template['geometries'].append(g.asDictionary)
del g
params['geometries'] = template
geomTemplate = {"geometryType": None,
"geometries" : []
}
if isinstance(geometry, Polyline):
geomTemplate['geometryType'] = "esriGeometryPolyline"
elif isinstance(geometry, Polygon):
geomTemplate['geometryType'] = "esriGeometryPolygon"
elif isinstance(geometry, Point):
geomTemplate['geometryType'] = "esriGeometryPoint"
elif isinstance(geometry, Point):
geomTemplate['geometryType'] = "esriGeometryMultipoint"
else:
raise AttributeError("Invalid geometry type")
geomTemplate['geometry'] = geometry.asDictionary
params['geometry'] = geomTemplate
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def distance(self,
sr,
geometry1,
geometry2,
distanceUnit="",
geodesic=False
):
""""""
url = self._url + "/distance"
params = {
"f" : "json",
"sr" : sr,
"distanceUnit" : distanceUnit,
"geodesic" : geodesic
}
geometry1 = self.__geometryToGeomTemplate(geometry=geometry1)
geometry2 = self.__geometryToGeomTemplate(geometry=geometry2)
params['geometry1'] = geometry1
params['geometry2'] = geometry2
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def findTransformation(self, inSR, outSR, extentOfInterest=None, numOfResults=1):
"""
The findTransformations operation is performed on a geometry
service resource. This operation returns a list of applicable
geographic transformations you should use when projecting
geometries from the input spatial reference to the output spatial
reference. The transformations are in JSON format and are returned
in order of most applicable to least applicable. Recall that a
geographic transformation is not needed when the input and output
spatial references have the same underlying geographic coordinate
systems. In this case, findTransformations returns an empty list.
Every returned geographic transformation is a forward
transformation meaning that it can be used as-is to project from
the input spatial reference to the output spatial reference. In the
case where a predefined transformation needs to be applied in the
reverse direction, it is returned as a forward composite
transformation containing one transformation and a transformForward
element with a value of false.
Inputs:
inSR - The well-known ID (WKID) of the spatial reference or a
spatial reference JSON object for the input geometries
outSR - The well-known ID (WKID) of the spatial reference or a
spatial reference JSON object for the input geometries
extentOfInterest - The bounding box of the area of interest
specified as a JSON envelope. If provided, the extent of
interest is used to return the most applicable geographic
transformations for the area. If a spatial reference is not
included in the JSON envelope, the inSR is used for the
envelope.
numOfResults - The number of geographic transformations to
return. The default value is 1. If numOfResults has a value of
-1, all applicable transformations are returned.
"""
params = {
"f" : "json",
"inSR" : inSR,
"outSR" : outSR
}
url = self._url + "/findTransformations"
if isinstance(numOfResults, int):
params['numOfResults'] = numOfResults
if isinstance(extentOfInterest, Envelope):
params['extentOfInterest'] = extentOfInterest.asDictionary
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def fromGeoCoordinateString(self, sr, strings,
conversionType, conversionMode=None):
"""
The fromGeoCoordinateString operation is performed on a geometry
service resource. The operation converts an array of well-known
strings into xy-coordinates based on the conversion type and
spatial reference supplied by the user. An optional conversion mode
parameter is available for some conversion types.
Inputs:
sr - The well-known ID of the spatial reference or a spatial
reference json object.
strings - An array of strings formatted as specified by
conversionType.
Syntax: [<string1>,...,<stringN>]
Example: ["01N AA 66021 00000","11S NT 00000 62155",
"31U BT 94071 65288"]
conversionType - The conversion type of the input strings.
Valid conversion types are:
MGRS - Military Grid Reference System
USNG - United States National Grid
UTM - Universal Transverse Mercator
GeoRef - World Geographic Reference System
GARS - Global Area Reference System
DMS - Degree Minute Second
DDM - Degree Decimal Minute
DD - Decimal Degree
conversionMode - Conversion options for MGRS, UTM and GARS
conversion types.
Conversion options for MGRS and UTM conversion types.
Valid conversion modes for MGRS are:
mgrsDefault - Default. Uses the spheroid from the given spatial
reference.
mgrsNewStyle - Treats all spheroids as new, like WGS 1984. The
180 degree longitude falls into Zone 60.
mgrsOldStyle - Treats all spheroids as old, like Bessel 1841.
The 180 degree longitude falls into Zone 60.
mgrsNewWith180InZone01 - Same as mgrsNewStyle except the 180
degree longitude falls into Zone 01.
mgrsOldWith180InZone01 - Same as mgrsOldStyle except the 180
degree longitude falls into Zone 01.
Valid conversion modes for UTM are:
utmDefault - Default. No options.
utmNorthSouth - Uses north/south latitude indicators instead of
zone numbers. Non-standard. Default is recommended
"""
url = self._url + "/fromGeoCoordinateString"
params = {
"f" : "json",
"sr" : sr,
"strings" : strings,
"conversionType" : conversionType
}
if not conversionMode is None:
params['conversionMode'] = conversionMode
return self._post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def generalize(self,
sr,
geometries,
maxDeviation,
deviationUnit):
""""""
url = self._url + "/generalize"
params = {
"f" : "json",
"sr" : sr,
"deviationUnit" : deviationUnit,
"maxDeviation": maxDeviation
}
params['geometries'] = self.__geometryListToGeomTemplate(geometries=geometries)
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def intersect(self,
sr,
geometries,
geometry
):
""""""
url = self._url + "/intersect"
params = {
"f" : "json",
"sr" : sr,
"geometries" : self.__geometryListToGeomTemplate(geometries=geometries),
"geometry" : self.__geometryToGeomTemplate(geometry=geometry)
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def labelPoints(self,
sr,
polygons,
):
""""""
url = self._url + "/labelPoints"
params = {
"f" : "json",
"sr" : sr,
"polygons": self.__geomToStringArray(geometries=polygons,
returnType="list")
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def lengths(self,
sr,
polylines,
lengthUnit,
calculationType
):
""""""
allowedCalcTypes = ['planar', 'geodesic', 'preserveShape']
if calculationType not in allowedCalcTypes:
raise AttributeError("Invalid calculation Type")
url = self._url + "/lengths"
params = {
"f" : "json",
"sr" : sr,
"polylines": self.__geomToStringArray(geometries=polylines,
returnType="list"),
"lengthUnit" : lengthUnit,
"calculationType" : calculationType
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def offset(self,
geometries,
offsetDistance,
offsetUnit,
offsetHow="esriGeometryOffsetRounded",
bevelRatio=10,
simplifyResult=False,
sr=None,
):
""""""
allowedHow = ["esriGeometryOffsetRounded",
"esriGeometryOffsetBevelled",
"esriGeometryOffsetMitered"]
if offsetHow not in allowedHow:
raise AttributeError("Invalid Offset How value")
url = self._url + "/offset"
params = {
"f" : "json",
"sr" : sr,
"geometries": self.__geometryListToGeomTemplate(geometries=geometries),
"offsetDistance": offsetDistance,
"offsetUnit" : offsetUnit,
"offsetHow" : offsetHow,
"bevelRatio" : bevelRatio,
"simplifyResult" : simplifyResult
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def project(self,
geometries,
inSR,
outSR,
transformation="",
transformFoward=False):
""""""
url = self._url + "/project"
params = {
"f" : "json",
"inSR" : inSR,
"geometries": self.__geometryListToGeomTemplate(geometries=geometries),
"outSR" : outSR,
"transformation" : transformation,
"transformFoward": transformFoward
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def relation(self,
geometries1,
geometries2,
sr,
relation="esriGeometryRelationIntersection",
relationParam=""):
""""""
relationType = [
"esriGeometryRelationCross",
"esriGeometryRelationDisjoint",
"esriGeometryRelationIn",
"esriGeometryRelationInteriorIntersection",
"esriGeometryRelationIntersection",
"esriGeometryRelationLineCoincidence",
"esriGeometryRelationLineTouch",
"esriGeometryRelationOverlap",
"esriGeometryRelationPointTouch",
"esriGeometryRelationTouch",
"esriGeometryRelationWithin",
"esriGeometryRelationRelation"
]
if relation not in relationType:
raise AttributeError("Invalid relation type")
url = self._url + "/relation"
params = {
"f" : "json",
"sr" : sr,
"geometries1": self.__geometryListToGeomTemplate(geometries=geometries1),
"geometries2": self.__geometryListToGeomTemplate(geometries=geometries2),
"relation" : relation,
"relationParam" : relationParam
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def reshape(self,
sr,
target,
reshaper
):
"""calls the reshape command on a geometry service"""
url = self._url + "/reshape"
params = {
"f" : "json",
"sr" : sr,
"target" : self.__geometryToGeomTemplate(geometry=target)
}
if isinstance(reshaper, Polyline):
params["reshaper"] = reshaper.asDictionary
else:
raise AttributeError("Invalid reshaper object, must be Polyline")
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def simplify(self,
sr,
geometries
):
"""returns a simplied geometry object"""
url = self._url + "/simplify"
params = {
"f" : "json",
"sr" : sr,
"geometries" : self.__geometryListToGeomTemplate(geometries=geometries)
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def toGeoCoordinateString(self,
sr,
coordinates,
conversionType,
conversionMode="mgrsDefault",
numOfDigits=None,
rounding=True,
addSpaces=True
):
"""
The toGeoCoordinateString operation is performed on a geometry
service resource. The operation converts an array of
xy-coordinates into well-known strings based on the conversion type
and spatial reference supplied by the user. Optional parameters are
available for some conversion types. Note that if an optional
parameter is not applicable for a particular conversion type, but a
value is supplied for that parameter, the value will be ignored.
Inputs:
sr - The well-known ID of the spatial reference or a spatial
reference json object.
coordinates - An array of xy-coordinates in JSON format to be
converted. Syntax: [[x1,y2],...[xN,yN]]
conversionType - The conversion type of the input strings.
Allowed Values:
MGRS - Military Grid Reference System
USNG - United States National Grid
UTM - Universal Transverse Mercator
GeoRef - World Geographic Reference System
GARS - Global Area Reference System
DMS - Degree Minute Second
DDM - Degree Decimal Minute
DD - Decimal Degree
conversionMode - Conversion options for MGRS and UTM conversion
types.
Valid conversion modes for MGRS are:
mgrsDefault - Default. Uses the spheroid from the given spatial
reference.
mgrsNewStyle - Treats all spheroids as new, like WGS 1984. The
180 degree longitude falls into Zone 60.
mgrsOldStyle - Treats all spheroids as old, like Bessel 1841.
The 180 degree longitude falls into Zone 60.
mgrsNewWith180InZone01 - Same as mgrsNewStyle except the 180
degree longitude falls into Zone 01.
mgrsOldWith180InZone01 - Same as mgrsOldStyle except the 180
degree longitude falls into Zone 01.
Valid conversion modes for UTM are:
utmDefault - Default. No options.
utmNorthSouth - Uses north/south latitude indicators instead of
zone numbers. Non-standard. Default is recommended.
numOfDigits - The number of digits to output for each of the
numerical portions in the string. The default value for
numOfDigits varies depending on conversionType.
rounding - If true, then numeric portions of the string are
rounded to the nearest whole magnitude as specified by
numOfDigits. Otherwise, numeric portions of the string are
truncated. The rounding parameter applies only to conversion
types MGRS, USNG and GeoRef. The default value is true.
addSpaces - If true, then spaces are added between components of
the string. The addSpaces parameter applies only to conversion
types MGRS, USNG and UTM. The default value for MGRS is false,
while the default value for both USNG and UTM is true.
"""
params = {
"f": "json",
"sr" : sr,
"coordinates" : coordinates,
"conversionType": conversionType
}
url = self._url + "/toGeoCoordinateString"
if not conversionMode is None:
params['conversionMode'] = conversionMode
if isinstance(numOfDigits, int):
params['numOfDigits'] = numOfDigits
if isinstance(rounding, int):
params['rounding'] = rounding
if isinstance(addSpaces, bool):
params['addSpaces'] = addSpaces
return self._post(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler)
#----------------------------------------------------------------------
def trimExtend(self,
sr,
polylines,
trimExtendTo,
extendHow=0):
""""""
allowedHow = [0,1,2,4,8,16]
if extendHow not in allowedHow:
raise AttributeError("Invalid extend How value.")
url = self._url + "/trimExtend"
params = {
"f" : "json",
"sr" : sr,
"polylines" : self.__geomToStringArray(geometries=polylines, returnType="list"),
"extendHow": extendHow,
"trimExtendTo" : trimExtendTo.asDictionary
}
return self._get(url=url, param_dict=params,
proxy_url=self._proxy_url,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def union(self,
sr,
geometries):
""""""
url = self._url + "/union"
params = {
"f" : "json",
"sr" : sr,
"geometries" : self.__geometryListToGeomTemplate(geometries=geometries)
}
return self._get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
| 45.419847
| 116
| 0.504898
|
0ccc46e4a10a396efd2992ebac874b3663cb06e6
| 3,088
|
py
|
Python
|
backend/pyrogram/raw/functions/account/update_profile.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/raw/functions/account/update_profile.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/raw/functions/account/update_profile.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class UpdateProfile(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``123``
- ID: ``0x78515775``
Parameters:
first_name (optional): ``str``
last_name (optional): ``str``
about (optional): ``str``
Returns:
:obj:`User <pyrogram.raw.base.User>`
"""
__slots__: List[str] = ["first_name", "last_name", "about"]
ID = 0x78515775
QUALNAME = "functions.account.UpdateProfile"
def __init__(self, *, first_name: Union[None, str] = None, last_name: Union[None, str] = None,
about: Union[None, str] = None) -> None:
self.first_name = first_name # flags.0?string
self.last_name = last_name # flags.1?string
self.about = about # flags.2?string
@staticmethod
def read(data: BytesIO, *args: Any) -> "UpdateProfile":
flags = Int.read(data)
first_name = String.read(data) if flags & (1 << 0) else None
last_name = String.read(data) if flags & (1 << 1) else None
about = String.read(data) if flags & (1 << 2) else None
return UpdateProfile(first_name=first_name, last_name=last_name, about=about)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.first_name is not None else 0
flags |= (1 << 1) if self.last_name is not None else 0
flags |= (1 << 2) if self.about is not None else 0
data.write(Int(flags))
if self.first_name is not None:
data.write(String(self.first_name))
if self.last_name is not None:
data.write(String(self.last_name))
if self.about is not None:
data.write(String(self.about))
return data.getvalue()
| 34.696629
| 103
| 0.617876
|
a76c7f351f105aec46aa0905f0fc28ed5694d28c
| 7,092
|
py
|
Python
|
transit/write_handlers.py
|
3wnbr1/transit-python2
|
fb44976bf41b8bcc6dd9050c16ef7c4a24c9f81c
|
[
"Apache-2.0"
] | 1
|
2022-02-21T09:34:00.000Z
|
2022-02-21T09:34:00.000Z
|
transit/write_handlers.py
|
3wnbr1/transit-python2
|
fb44976bf41b8bcc6dd9050c16ef7c4a24c9f81c
|
[
"Apache-2.0"
] | null | null | null |
transit/write_handlers.py
|
3wnbr1/transit-python2
|
fb44976bf41b8bcc6dd9050c16ef7c4a24c9f81c
|
[
"Apache-2.0"
] | null | null | null |
## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import uuid
import datetime
import struct
from transit.class_hash import ClassDict
from transit.transit_types import (
Keyword,
Symbol,
URI,
frozendict,
TaggedValue,
Link,
Boolean,
)
from decimal import Decimal
from dateutil import tz
from math import isnan
MAX_INT = pow(2, 63) - 1
MIN_INT = -pow(2, 63)
## This file contains Write Handlers - all the top-level objects used when
## writing Transit data. These object must all be immutable and pickleable.
class TaggedMap(object):
def __init__(self, tag, rep, str):
self._tag = tag
self._rep = rep
self._str = str
def tag(self):
return self._tag
def rep(self):
return self._rep
def string_rep(self):
return self._str
class NoneHandler(object):
@staticmethod
def tag(_):
return "_"
@staticmethod
def rep(_):
return None
@staticmethod
def string_rep(n):
return None
class IntHandler(object):
@staticmethod
def tag(i):
return "i"
@staticmethod
def rep(i):
return i
@staticmethod
def string_rep(i):
return str(i)
class BigIntHandler(object):
@staticmethod
def tag(_):
return "n"
@staticmethod
def rep(n):
return str(n)
@staticmethod
def string_rep(n):
return str(n)
class Python3IntHandler(object):
@staticmethod
def tag(n):
if n < MAX_INT and n > MIN_INT:
return "i"
return "n"
@staticmethod
def rep(n):
return n
@staticmethod
def string_rep(n):
return str(n)
class BigDecimalHandler(object):
@staticmethod
def tag(_):
return "f"
@staticmethod
def rep(n):
return str(n)
@staticmethod
def string_rep(n):
return str(n)
class FloatHandler(object):
@staticmethod
def tag(f):
return "z" if isnan(f) or f in (float("Inf"), float("-Inf")) else "d"
@staticmethod
def rep(f):
if isnan(f):
return "NaN"
if f == float("Inf"):
return "INF"
if f == float("-Inf"):
return "-INF"
return f
@staticmethod
def string_rep(f):
return str(f)
class StringHandler(object):
@staticmethod
def tag(s):
return "s"
@staticmethod
def rep(s):
return s
@staticmethod
def string_rep(s):
return s
class BooleanHandler(object):
@staticmethod
def tag(_):
return "?"
@staticmethod
def rep(b):
return bool(b)
@staticmethod
def string_rep(b):
return "t" if b else "f"
class ArrayHandler(object):
@staticmethod
def tag(a):
return "array"
@staticmethod
def rep(a):
return a
@staticmethod
def string_rep(a):
return None
class MapHandler(object):
@staticmethod
def tag(m):
return "map"
@staticmethod
def rep(m):
return m
@staticmethod
def string_rep(m):
return None
class KeywordHandler(object):
@staticmethod
def tag(k):
return ":"
@staticmethod
def rep(k):
return str(k)
@staticmethod
def string_rep(k):
return str(k)
class SymbolHandler(object):
@staticmethod
def tag(s):
return "$"
@staticmethod
def rep(s):
return str(s)
@staticmethod
def string_rep(s):
return str(s)
class UuidHandler(object):
@staticmethod
def tag(_):
return "u"
@staticmethod
def rep(u):
return struct.unpack(">qq", u.bytes)
@staticmethod
def string_rep(u):
return str(u)
class UriHandler(object):
@staticmethod
def tag(_):
return "r"
@staticmethod
def rep(u):
return u.rep
@staticmethod
def string_rep(u):
return u.rep
class DateTimeHandler(object):
epoch = datetime.datetime(1970, 1, 1).replace(tzinfo=tz.tzutc())
@staticmethod
def tag(_):
return "m"
@staticmethod
def rep(d):
td = d - DateTimeHandler.epoch
return int((td.microseconds + (td.seconds + td.days * 24 * 3600) * pow(10, 6)) / 1e3)
@staticmethod
def verbose_handler():
return VerboseDateTimeHandler
@staticmethod
def string_rep(d):
return str(DateTimeHandler.rep(d))
class VerboseDateTimeHandler(object):
@staticmethod
def tag(_):
return "t"
@staticmethod
def rep(d):
return d.isoformat()
@staticmethod
def string_rep(d):
return d.isoformat()
class SetHandler(object):
@staticmethod
def tag(_):
return "set"
@staticmethod
def rep(s):
return TaggedMap("array", tuple(s), None)
@staticmethod
def string_rep(_):
return None
class TaggedValueHandler(object):
@staticmethod
def tag(tv):
return tv.tag
@staticmethod
def rep(tv):
return tv.rep
@staticmethod
def string_rep(_):
return None
class LinkHandler(object):
@staticmethod
def tag(_):
return "link"
@staticmethod
def rep(l):
return l.as_map
@staticmethod
def string_rep(_):
return None
class WriteHandler(ClassDict):
"""This is the master handler for encoding/writing Python data into
Transit data, based on its type.
The Handler itself is a dispatch map, that resolves on full type/object
inheritance.
These handlers can be overriden during the creation of a Transit Writer.
"""
def __init__(self):
super(WriteHandler, self).__init__()
self[type(None)] = NoneHandler
self[bool] = BooleanHandler
self[Boolean] = BooleanHandler
self[str] = StringHandler
self[list] = ArrayHandler
self[tuple] = ArrayHandler
self[dict] = MapHandler
self[int] = Python3IntHandler
self[float] = FloatHandler
self[Keyword] = KeywordHandler
self[Symbol] = SymbolHandler
self[uuid.UUID] = UuidHandler
self[URI] = UriHandler
self[datetime.datetime] = DateTimeHandler
self[set] = SetHandler
self[frozenset] = SetHandler
self[TaggedMap] = TaggedMap
self[dict] = MapHandler
self[frozendict] = MapHandler
self[TaggedValue] = TaggedValueHandler
self[Link] = LinkHandler
self[Decimal] = BigDecimalHandler
| 19.219512
| 93
| 0.606035
|
c7901f3e108fde9969897d884e2f50430efcced8
| 11,028
|
py
|
Python
|
ocpp/messages.py
|
MrMika96/ocpp
|
bdcb28492c84a977b58069df9a9e78fadb095e9a
|
[
"MIT"
] | null | null | null |
ocpp/messages.py
|
MrMika96/ocpp
|
bdcb28492c84a977b58069df9a9e78fadb095e9a
|
[
"MIT"
] | null | null | null |
ocpp/messages.py
|
MrMika96/ocpp
|
bdcb28492c84a977b58069df9a9e78fadb095e9a
|
[
"MIT"
] | 1
|
2019-12-05T18:10:06.000Z
|
2019-12-05T18:10:06.000Z
|
""" Module containing classes that model the several OCPP messages types. It
also contain some helper functions for packing and unpacking messages. """
import os
import json
import decimal
from dataclasses import asdict, is_dataclass
from jsonschema import validate
from jsonschema.exceptions import ValidationError as SchemaValidationError
from ocpp.exceptions import (OCPPError, FormatViolationError,
PropertyConstraintViolationError,
ProtocolError, ValidationError,
UnknownCallErrorCodeError)
_schemas = {}
class MessageType:
""" Number identifying the different types of OCPP messages. """
#: Call identifies a request.
Call = 2
#: CallResult identifies a successful response.
CallResult = 3
#: CallError identifies an erroneous response.
CallError = 4
def unpack(msg):
"""
Unpacks a message into either a Call, CallError or CallResult.
"""
try:
msg = json.loads(msg)
except json.JSONDecodeError as e:
raise FormatViolationError(f'Message is not valid JSON: {e}')
if not isinstance(msg, list):
raise ProtocolError("OCPP message hasn't the correct format. It "
f"should be a list, but got {type(msg)} instead")
for cls in [Call, CallResult, CallError]:
try:
if msg[0] == cls.message_type_id:
return cls(*msg[1:])
except IndexError:
raise ProtocolError("Message doesn\'t contain MessageTypeID")
raise PropertyConstraintViolationError(f"MessageTypeId '{msg[0]}' isn't "
"valid")
def pack(msg):
"""
Returns the JSON representation of a Call, CallError or CallResult.
It just calls the 'to_json()' method of the message. But it is here mainly
to complement the 'unpack' function of this module.
"""
return msg.to_json()
def get_schema(message_type_id, action, ocpp_version, parse_float=float):
"""
Read schema from disk and return in. Reads will be cached for performance
reasons.
The `parse_float` argument can be used to set the conversion method that
is used to parse floats. It must be a callable taking 1 argument. By
default it is `float()`, but certain schema's require `decimal.Decimal()`.
"""
if ocpp_version not in ["1.6", "2.0"]:
raise ValueError
schemas_dir = 'v' + ocpp_version.replace('.', '')
schema_name = action
if message_type_id == MessageType.CallResult:
schema_name += 'Response'
elif message_type_id == MessageType.Call:
if ocpp_version == "2.0":
schema_name += 'Request'
if ocpp_version == "2.0":
schema_name += '_v1p0'
dir, _ = os.path.split(os.path.realpath(__file__))
relative_path = f'{schemas_dir}/schemas/{schema_name}.json'
path = os.path.join(dir, relative_path)
if relative_path in _schemas:
return _schemas[relative_path]
# The JSON schemas for OCPP 2.0 start with a byte order mark (BOM)
# character. If no encoding is given, reading the schema would fail with:
#
# Unexpected UTF-8 BOM (decode using utf-8-sig):
with open(path, 'r', encoding='utf-8-sig') as f:
data = f.read()
_schemas[relative_path] = json.loads(data, parse_float=parse_float)
return _schemas[relative_path]
def validate_payload(message, ocpp_version):
""" Validate the payload of the message using JSON schemas. """
if type(message) not in [Call, CallResult]:
raise ValidationError("Payload can't be validated because message "
f"type. It's '{type(message)}', but it should "
"be either 'Call' or 'CallResult'.")
try:
# 3 OCPP 1.6 schedules have fields of type floats. The JSON schema
# defines a certain precision for these fields of 1 decimal. A value of
# 21.4 is valid, whereas a value if 4.11 is not.
#
# The problem is that Python's internal representation of 21.4 might
# have more than 1 decimal. It might be 21.399999999999995. This would
# make the validation fail, although the payload is correct. This is a
# known issue with jsonschemas, see:
# https://github.com/Julian/jsonschema/issues/247
#
# This issue can be fixed by using a different parser for floats than
# the default one that is used.
#
# Both the schema and the payload must be parsed using the different
# parser for floats.
if ocpp_version == '1.6' and (
(type(message) == Call and
message.action in ['SetChargingProfile', 'RemoteStartTransaction']) # noqa
or
(type(message) == CallResult and
message.action == ['GetCompositeSchedule'])
):
schema = get_schema(
message.message_type_id, message.action,
ocpp_version, parse_float=decimal.Decimal
)
message.payload = json.loads(
json.dumps(message.payload), parse_float=decimal.Decimal
)
else:
schema = get_schema(
message.message_type_id, message.action, ocpp_version
)
except (OSError, json.JSONDecodeError) as e:
raise ValidationError("Failed to load validation schema for action "
f"'{message.action}': {e}")
try:
validate(message.payload, schema)
except SchemaValidationError as e:
raise ValidationError(f"Payload '{message.payload} for action "
f"'{message.action}' is not valid: {e}")
class Call:
""" A Call is a type of message that initiate a request/response sequence.
Both central systems and charge points can send this message.
From the specification:
A Call always consists of 4 elements: The standard elements
MessageTypeId and UniqueId, a specific Action that is required on the
other side and a payload, the arguments to the Action. The syntax of a
call looks like this:
[<MessageTypeId>, "<UniqueId>", "<Action>", {<Payload>}]
...
For example, a BootNotification request could look like this:
[2,
"19223201",
"BootNotification",
{
"chargePointVendor": "VendorX",
"chargePointModel": "SingleSocketCharger"
}
]
"""
message_type_id = 2
def __init__(self, unique_id, action, payload):
self.unique_id = unique_id
self.action = action
self.payload = payload
if is_dataclass(payload):
self.payload = asdict(payload)
def to_json(self):
""" Return a valid JSON representation of the instance. """
return json.dumps([
self.message_type_id,
self.unique_id,
self.action,
self.payload,
])
def create_call_result(self, payload):
call_result = CallResult(self.unique_id, payload)
call_result.action = self.action
return call_result
def create_call_error(self, exception):
error_code = "InternalError"
error_description = "An unexpected error occurred."
error_details = {}
if isinstance(exception, OCPPError):
error_code = exception.code
error_description = exception.description
error_details = exception.details
return CallError(
self.unique_id,
error_code,
error_description,
error_details,
)
def __repr__(self):
return f"<Call - unique_id={self.unique_id}, action={self.action}, " \
f"payload={self.payload}>"
class CallResult:
"""
A CallResult is a message indicating that a Call has been handled
succesfully.
From the specification:
A CallResult always consists of 3 elements: The standard elements
MessageTypeId, UniqueId and a payload, containing the response to the
Action in the original Call. The syntax of a call looks like this:
[<MessageTypeId>, "<UniqueId>", {<Payload>}]
...
For example, a BootNotification response could look like this:
[3,
"19223201",
{
"status":"Accepted",
"currentTime":"2013-02-01T20:53:32.486Z",
"heartbeatInterval":300
}
]
"""
message_type_id = 3
def __init__(self, unique_id, payload, action=None):
self.unique_id = unique_id
self.payload = payload
# Strictly speaking no action is required in a CallResult. But in order
# to validate the message it is needed.
self.action = action
def to_json(self):
return json.dumps([
self.message_type_id,
self.unique_id,
self.payload,
])
def __repr__(self):
return f"<CallResult - unique_id={self.unique_id}, " \
f"action={self.action}, " \
f"payload={self.payload}>"
class CallError:
"""
A CallError is a response to a Call that indicates an error.
From the specification:
CallError always consists of 5 elements: The standard elements
MessageTypeId and UniqueId, an errorCode string, an errorDescription
string and an errorDetails object.
The syntax of a call looks like this:
[<MessageTypeId>, "<UniqueId>", "<errorCode>", "<errorDescription>", {<errorDetails>}] # noqa
"""
message_type_id = 4
def __init__(self, unique_id, error_code, error_description,
error_details=None):
self.unique_id = unique_id
self.error_code = error_code
self.error_description = error_description
self.error_details = error_details
def to_json(self):
return json.dumps([
self.message_type_id,
self.unique_id,
self.error_code,
self.error_description,
self.error_details,
])
def to_exception(self):
""" Return the exception that corresponds to the CallError. """
for error in OCPPError.__subclasses__():
if error.code == self.error_code:
return error(
description=self.error_description,
details=self.error_details
)
raise UnknownCallErrorCodeError("Error code '%s' is not defined by the"
" OCPP specification", self.error_code)
def __repr__(self):
return f"<CallError - unique_id={self.unique_id}, " \
f"error_code={self.error_code}, " \
f"error_description={self.error_description}, " \
f"error_details={self.error_details}>"
| 33.317221
| 105
| 0.607907
|
8acdc4f687e8ac998472c334443734723af4f83c
| 1,592
|
py
|
Python
|
plot.py
|
dvapan/cerhe
|
42a860907ea182b98b2243c0d66a4bc0f37d6c78
|
[
"MIT"
] | null | null | null |
plot.py
|
dvapan/cerhe
|
42a860907ea182b98b2243c0d66a4bc0f37d6c78
|
[
"MIT"
] | null | null | null |
plot.py
|
dvapan/cerhe
|
42a860907ea182b98b2243c0d66a4bc0f37d6c78
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sc
from cylp.cy import CyClpSimplex
from cylp.py.modeling.CyLPModel import CyLPArray
import matplotlib.pyplot as plt
from poly import mvmonos, powers
from constants import *
from gas_properties import TGZ, gas_coefficients
from air_properties import TBZ, air_coefficients
import ceramic_properties as cp
pc = np.loadtxt("poly_coeff")
cff_cnt = [10,20,10,20]
s,f = 0,cff_cnt[0]
tgh_cf = pc[s:f]
s,f = s+cff_cnt[0],f+cff_cnt[1]
tch_cf = pc[s:f]
s,f = s+cff_cnt[1],f+cff_cnt[2]
tgc_cf = pc[s:f]
s,f = s+cff_cnt[2],f+cff_cnt[3]
tcc_cf = pc[s:f]
X = sc.linspace(0, length, totalx*3)
T = sc.linspace(0, time, totalt*3)
R = sc.linspace(0.01*rball, rball, 10*3)
R = R[::-1]
#gas
tt,xx = np.meshgrid(T,X)
in_pts_cr = np.vstack([tt.flatten(),xx.flatten()]).T
pp = mvmonos(in_pts_cr,powers(3,2))
tt,xx = np.meshgrid(T,X)
u = pp.dot(tgh_cf)
uu = u.reshape((len(T), len(X)))
print(uu[0,:])
plt.plot(tt[0,:],uu[-1,:])
# ceramic
tt,xx,rr = np.meshgrid(T,X,R[0])
in_pts_cr = np.vstack([tt.flatten(),xx.flatten(),rr.flatten()]).T
pp = mvmonos(in_pts_cr,powers(3,3))
u = pp.dot(tch_cf)
uu = u.reshape((len(T), len(X)))
plt.plot(tt[0,:],uu[-1,:])
tt,xx,rr = np.meshgrid(T,X,R[-1])
in_pts_cr = np.vstack([tt.flatten(),xx.flatten(),rr.flatten()]).T
pp = mvmonos(in_pts_cr,powers(3,3))
u = pp.dot(tch_cf)
uu = u.reshape((len(T), len(X)))
plt.plot(tt[0,:],uu[-1,:])
# fig, ax = plt.subplots()
# p = ax.contourf(tt, xx, uu, np.linspace(700, 1900, 100), cmap='inferno')
# fig.colorbar(p, ax=ax)
# fig.tight_layout()
plt.xlim(0, 300)
plt.ylim(760, 800)
plt.show()
| 21.808219
| 74
| 0.666457
|
0934702c8d34ef7b4ad29923346fc91c44e9b4ed
| 27,038
|
py
|
Python
|
language/DeepNet/model/embed.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
language/DeepNet/model/embed.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
language/DeepNet/model/embed.py
|
huxin711/ColossalAI-Examples
|
fa3560683dec891315d5356e76c10ff20e41266f
|
[
"Apache-2.0"
] | null | null | null |
# Adapted from https://github.com/hpcaitech/ColossalAI-Examples/blob/main/language/gpt/model/embed.py
import torch
from torch import nn as nn, Tensor, distributed as dist
from torch.nn import functional as F
import torch.nn.init as init
from colossalai.context import seed, ParallelMode
from colossalai.core import global_context as gpc
from colossalai.utils import get_current_device
from colossalai.nn.layer.base_layer import ParallelLayer
from torch.nn.parameter import Parameter
from colossalai.registry import LAYERS, LOSSES
from colossalai.nn.layer.utils import divide
from colossalai.nn.layer.parallel_1d._utils import gather_forward_split_backward, reduce_grad, reduce_input
from colossalai.nn.layer.parallel_1d.layers import Linear1D_Row
class VocabParallelEmbedding(torch.nn.Module):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(self,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
num_tokentypes=0,
dtype=torch.float):
super(VocabParallelEmbedding, self).__init__()
self.hidden_size = hidden_size
self.num_tokentypes = num_tokentypes
# Word embeddings (parallel).
self.word_embeddings = VocabParallelEmbedding1D(
vocab_size, self.hidden_size, dtype=dtype)
self._word_embeddings_key = 'word_embeddings'
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(
max_sequence_length, self.hidden_size, dtype=dtype)
self._position_embeddings_key = 'position_embeddings'
# Initialize the position embeddings.
# self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = 'tokentype_embeddings'
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes,
self.hidden_size, dtype=dtype)
# Initialize the token-type embeddings.
# self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.data.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception('tokentype embeddings is already initialized')
if torch.distributed.get_rank() == 0:
print('adding embedding for {} tokentypes'.format(num_tokentypes),
flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes,
self.hidden_size)
# Initialize the token-type embeddings.
# self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids=None, tokentype_ids=None):
# Embeddings.
if input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
words_embeddings = self.word_embeddings(input_ids)
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if position_ids is None:
position_ids = torch.arange(0, input_shape[-1] + 0, dtype=torch.long, device=get_current_device())
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
# Dropout.
with seed(ParallelMode.TENSOR):
embeddings = self.embedding_dropout(embeddings)
return embeddings
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._word_embeddings_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
state_dict_[self._position_embeddings_key] \
= self.position_embeddings.state_dict(
destination, prefix, keep_vars)
if self.num_tokentypes > 0:
state_dict_[self._tokentype_embeddings_key] \
= self.tokentype_embeddings.state_dict(
destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Word embedding.
if self._word_embeddings_key in state_dict:
state_dict_ = state_dict[self._word_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'word_embeddings' in key:
state_dict_[key.split('word_embeddings.')[1]] \
= state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
# Position embedding.
if self._position_embeddings_key in state_dict:
state_dict_ = state_dict[self._position_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'position_embeddings' in key:
state_dict_[key.split('position_embeddings.')[1]] \
= state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
# Tokentype embedding.
if self.num_tokentypes > 0:
state_dict_ = {}
if self._tokentype_embeddings_key in state_dict:
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
# for backward compatibility.
for key in state_dict.keys():
if 'tokentype_embeddings' in key:
state_dict_[key.split('tokentype_embeddings.')[1]] \
= state_dict[key]
if len(state_dict_.keys()) > 0:
self.tokentype_embeddings.load_state_dict(state_dict_,
strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the '
'checkpoint but could not find it', flush=True)
class VocabParallelEmbedding1D(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(self, num_embeddings, embedding_dim, dtype=None,
init_method=None):
super(VocabParallelEmbedding1D, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.tensor_model_parallel_size = gpc.tensor_parallel_size
# Divide the weight matrix along the vocaburaly dimension.
self.vocab_start_index, self.vocab_end_index = \
VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, gpc.get_local_rank(ParallelMode.PARALLEL_1D),
self.tensor_model_parallel_size)
self.num_embeddings_per_partition = self.vocab_end_index - \
self.vocab_start_index
# Allocate weights and initialize.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
self.num_embeddings_per_partition, self.embedding_dim,
**factory_kwargs))
init.uniform_(self.weight, -1, 1)
def forward(self, input_):
if self.tensor_model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | \
(input_ >= self.vocab_end_index)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(masked_input, self.weight,
self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq,
self.sparse)
# Mask the output embedding.
if self.tensor_model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D)
return output
@LOSSES.register_module
class vocab_parallel_cross_entropy(nn.Module):
def __init__(self):
super().__init__()
def forward(self, vocab_parallel_logits, target):
"""Helper function for the cross entropy."""
vocab_parallel_logits = vocab_parallel_logits[..., :-1, :].contiguous()
target = target[..., 1:].contiguous()
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits.view(-1, vocab_parallel_logits.size(-1)), target.view(-1))
class _VocabParallelCrossEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits, target):
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(vocab_parallel_logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Subtract the maximum value.
vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1))
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D)
world_size = gpc.tensor_parallel_size
vocab_start_index, vocab_end_index = get_vocab_range(
partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits_1d = predicted_logits_1d.clone().contiguous()
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = vocab_parallel_logits
torch.exp(vocab_parallel_logits, out=exp_logits)
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=gpc.get_group(ParallelMode.PARALLEL_1D))
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
loss = loss.mean()
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
@staticmethod
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= (
1.0 - target_mask.view(-1).float())
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
class VocabUtility:
"""Split the vocabulary into `world_size` chunks amd return the
first and last index of the vocabulary belonging to the `rank`
partition: Note that indecies in [fist, last)"""
@staticmethod
def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size,
rank, world_size):
index_f = rank * per_partition_vocab_size
index_l = index_f + per_partition_vocab_size
return index_f, index_l
@staticmethod
def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size):
per_partition_vocab_size = divide(global_vocab_size, world_size)
return VocabUtility.vocab_range_from_per_partition_vocab_size(
per_partition_vocab_size, rank, world_size)
class VocabParallelLMHead1D(ParallelLayer):
"""
Language model head that shares the same parameters with the embedding matrix.
"""
def __init__(self,
embed=None,
vocab_size=None,
dtype=None,
embed_dim=None
):
super().__init__()
if embed is not None:
self.head = embed
else:
self.head = VocabParallelEmbedding1D(vocab_size, embed_dim, dtype=dtype)
def forward(self, x: Tensor) -> Tensor:
x = reduce_grad(x, ParallelMode.PARALLEL_1D)
x = F.linear(x, self.head.weight)
return x
###################################
class HiddenParallelEmbedding(torch.nn.Module):
"""Language model embeddings.
Arguments:
hidden_size: hidden size
vocab_size: vocabulary size
max_sequence_length: maximum size of sequence. This
is used for positional embedding
embedding_dropout_prob: dropout probability for embeddings
init_method: weight initialization method
num_tokentypes: size of the token-type embeddings. 0 value
will ignore this embedding
"""
def __init__(self,
hidden_size,
vocab_size,
max_sequence_length,
embedding_dropout_prob,
dtype=torch.float,
padding_idx: int = 0,
num_tokentypes=0,
):
super(HiddenParallelEmbedding, self).__init__()
self.hidden_size = hidden_size
self.num_tokentypes = num_tokentypes
# Word embeddings (parallel).
self.word_embeddings = HiddenParallelEmbedding1D(vocab_size, hidden_size, dtype, padding_idx)
self._word_embeddings_key = 'word_embeddings'
# Position embedding (serial).
self.position_embeddings = torch.nn.Embedding(
max_sequence_length, self.hidden_size)
self._position_embeddings_key = 'position_embeddings'
# Initialize the position embeddings.
# self.init_method(self.position_embeddings.weight)
# Token type embedding.
# Add this as an optional field that can be added through
# method call so we can load a pretrain model without
# token types and add them as needed.
self._tokentype_embeddings_key = 'tokentype_embeddings'
if self.num_tokentypes > 0:
self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes,
self.hidden_size)
# Initialize the token-type embeddings.
# self.init_method(self.tokentype_embeddings.weight)
else:
self.tokentype_embeddings = None
# Embeddings dropout
self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob)
def zero_parameters(self):
"""Zero out all parameters in embedding."""
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
self.position_embeddings.weight.data.fill_(0)
self.position_embeddings.weight.shared = True
if self.num_tokentypes > 0:
self.tokentype_embeddings.weight.data.fill_(0)
self.tokentype_embeddings.weight.shared = True
def add_tokentype_embeddings(self, num_tokentypes):
"""Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.
"""
if self.tokentype_embeddings is not None:
raise Exception('tokentype embeddings is already initialized')
if torch.distributed.get_rank() == 0:
print('adding embedding for {} tokentypes'.format(num_tokentypes),
flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes,
self.hidden_size)
# Initialize the token-type embeddings.
# self.init_method(self.tokentype_embeddings.weight)
def forward(self, input_ids, position_ids=None, tokentype_ids=None):
if input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
words_embeddings = self.word_embeddings(input_ids)
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if position_ids is None:
position_ids = torch.arange(0, input_shape[-1] + 0, dtype=torch.long, device=get_current_device())
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings
# Dropout.
with seed(ParallelMode.TENSOR):
embeddings = self.embedding_dropout(embeddings)
return embeddings
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load."""
state_dict_ = {}
state_dict_[self._word_embeddings_key] \
= self.word_embeddings.state_dict(destination, prefix, keep_vars)
state_dict_[self._position_embeddings_key] \
= self.position_embeddings.state_dict(
destination, prefix, keep_vars)
if self.num_tokentypes > 0:
state_dict_[self._tokentype_embeddings_key] \
= self.tokentype_embeddings.state_dict(
destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
# Word embedding.
if self._word_embeddings_key in state_dict:
state_dict_ = state_dict[self._word_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'word_embeddings' in key:
state_dict_[key.split('word_embeddings.')[1]] \
= state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
# Position embedding.
if self._position_embeddings_key in state_dict:
state_dict_ = state_dict[self._position_embeddings_key]
else:
# for backward compatibility.
state_dict_ = {}
for key in state_dict.keys():
if 'position_embeddings' in key:
state_dict_[key.split('position_embeddings.')[1]] \
= state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
# Tokentype embedding.
if self.num_tokentypes > 0:
state_dict_ = {}
if self._tokentype_embeddings_key in state_dict:
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
# for backward compatibility.
for key in state_dict.keys():
if 'tokentype_embeddings' in key:
state_dict_[key.split('tokentype_embeddings.')[1]] \
= state_dict[key]
if len(state_dict_.keys()) > 0:
self.tokentype_embeddings.load_state_dict(state_dict_,
strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the '
'checkpoint but could not find it', flush=True)
class HiddenParallelEmbedding1D(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(self, num_embeddings, embedding_dim, dtype=torch.float, padding_idx: int = None,
init_method=None):
super(HiddenParallelEmbedding1D, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
embed_dim_per_partition = divide(embedding_dim, gpc.tensor_parallel_size)
# Set the detauls for compatibility.
self.padding_idx = padding_idx
self.max_norm = None
self.norm_type = 2.
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
# Allocate weights and initialize.
factory_kwargs = {'device': get_current_device(), 'dtype': dtype}
self.weight = Parameter(torch.empty(
num_embeddings, embed_dim_per_partition,
**factory_kwargs))
init.uniform_(self.weight, -1, 1)
def forward(self, input_):
# Get the embeddings.
output_parallel = F.embedding(input_, self.weight,
self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq,
self.sparse)
# Reduce across all the model parallel GPUs.
output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1)
return output
@LAYERS.register_module
class HiddenParallelLMHead1D(ParallelLayer):
"""
Language model head that shares the same parameters with the embedding matrix.
"""
def __init__(self,
embed=None,
embed_dim=None,
vocab_size=None,
dtype=None,
):
super().__init__()
if embed is not None:
self.head = embed
self.synced_embed = True
else:
# self.embedding = HiddenParallelEmbedding1D(vocab_size, hidden_size, dtype, padding_idx)
# (hidden_size/q, vocab_size)
self.synced_embed = False
self.head = Linear1D_Row(in_features=embed_dim,
out_features=vocab_size,
bias=False,
dtype=dtype,
parallel_input=False
)
def forward(self, x: Tensor) -> Tensor:
if self.synced_embed:
x = F.linear(x, self.head.weight)
else:
x = self.head(x)
return x
| 43.330128
| 129
| 0.609956
|
ae78222521795ad9e8769a7f4b794d5c26ea5981
| 470
|
py
|
Python
|
app/models.py
|
james-muriithi/django-api
|
26c9c96eafd35464366eaabf4811e0b96bb981f6
|
[
"MIT"
] | null | null | null |
app/models.py
|
james-muriithi/django-api
|
26c9c96eafd35464366eaabf4811e0b96bb981f6
|
[
"MIT"
] | null | null | null |
app/models.py
|
james-muriithi/django-api
|
26c9c96eafd35464366eaabf4811e0b96bb981f6
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class News(models.Model):
title = models.CharField(max_length=124)
description = models.TextField()
user = models.ForeignKey(
User, on_delete=models.CASCADE, related_name='posted_news')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'News'
def __str__(self):
return self.title
| 24.736842
| 67
| 0.712766
|
eeeb5e2ab24b242aa6feaa78f7ca0a8ad5b4fec2
| 11,176
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/waterfall/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/waterfall/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/waterfall/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall"
_path_str = "waterfall.textfont"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the font used for `text`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.76435
| 82
| 0.558608
|
e11db7d38894b157ab89c015c8cdb3098e5afce4
| 700
|
py
|
Python
|
tests/base.py
|
openprocurement/galleon
|
d27adaaf1e4c4daa3825dbcc6ce8ef0e0db1cfdc
|
[
"Apache-2.0"
] | null | null | null |
tests/base.py
|
openprocurement/galleon
|
d27adaaf1e4c4daa3825dbcc6ce8ef0e0db1cfdc
|
[
"Apache-2.0"
] | 1
|
2018-06-21T12:20:30.000Z
|
2018-06-21T12:20:30.000Z
|
tests/base.py
|
openprocurement/galleon
|
d27adaaf1e4c4daa3825dbcc6ce8ef0e0db1cfdc
|
[
"Apache-2.0"
] | 2
|
2018-05-07T15:47:12.000Z
|
2022-02-14T21:06:46.000Z
|
import os.path
import json
import yaml
from galleon import Mapper
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, 'data/tender.json')) as _in:
TENDER = json.load(_in)
with open(os.path.join(here, 'data/mapping.yaml')) as _in:
MAPPING = yaml.load(_in)
with open(os.path.join(here, 'data/schema.json')) as _in:
SCHEMA = json.load(_in)
with open(os.path.join(here, 'data/expected.json')) as _in:
RESULT = json.load(_in)
class BaseTest(object):
def test_simple(self, resolver, data, result):
mapper = Mapper(
self.__class__.TEST_MAPPING, resolver
)
parsed = mapper.apply(data)
assert parsed == result
| 24.137931
| 59
| 0.668571
|
a49d367efaa2c6492f9a4db3a190885027f3238d
| 97
|
py
|
Python
|
OA/Mubanspider/apps.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
OA/Mubanspider/apps.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
OA/Mubanspider/apps.py
|
CircularWorld/AutoOffice
|
052b95e094ed8790abb9bf22683006d8f3307ee4
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class MubanspiderConfig(AppConfig):
name = 'Mubanspider'
| 16.166667
| 35
| 0.773196
|
c3b3390d521715b4e5a73941afd9aab6112eb290
| 306
|
py
|
Python
|
Python/minimum-number-of-keypresses.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T17:50:56.000Z
|
2018-10-11T21:16:44.000Z
|
Python/minimum-number-of-keypresses.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | null | null | null |
Python/minimum-number-of-keypresses.py
|
Priyansh2/LeetCode-Solutions
|
d613da1881ec2416ccbe15f20b8000e36ddf1291
|
[
"MIT"
] | 4
|
2018-10-11T18:50:32.000Z
|
2018-10-12T00:04:09.000Z
|
# Time: O(n)
# Space: O(1)
import collections
# greedy, sort
class Solution(object):
def minimumKeypresses(self, s):
"""
:type s: str
:rtype: int
"""
return sum(cnt*(i//9+1) for i, cnt in enumerate(sorted(collections.Counter(s).itervalues(), reverse=True)))
| 20.4
| 115
| 0.584967
|
0b313151d321feaed85b5301791bc97a4ec8b0da
| 7,434
|
py
|
Python
|
code/branches/itmightbedave/python/oauth/example/server.py
|
learningcom/oauth.googlecode.com
|
f0d654b215cad1f4d9a3726b5f0bd62c49b566b9
|
[
"Apache-2.0"
] | 2
|
2017-12-24T02:06:05.000Z
|
2020-10-21T05:03:22.000Z
|
code/branches/itmightbedave/python/oauth/example/server.py
|
learningcom/oauth.googlecode.com
|
f0d654b215cad1f4d9a3726b5f0bd62c49b566b9
|
[
"Apache-2.0"
] | null | null | null |
code/branches/itmightbedave/python/oauth/example/server.py
|
learningcom/oauth.googlecode.com
|
f0d654b215cad1f4d9a3726b5f0bd62c49b566b9
|
[
"Apache-2.0"
] | 1
|
2020-10-21T05:03:25.000Z
|
2020-10-21T05:03:25.000Z
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
import oauth
REQUEST_TOKEN_URL = 'http://pureshape.corp.yahoo.com/request_token'
ACCESS_TOKEN_URL = 'http://pureshape.corp.yahoo.com/access_token'
RENEW_ACCESS_TOKEN_URL = 'http://pureshape.corp.yahoo.com/renew_access_token'
AUTHORIZATION_URL = 'http://pureshape.corp.yahoo.com/authorize'
RESOURCE_URL = 'http://pureshape.corp.yahoo.com/photos'
REALM = 'http://pureshape.corp.yahoo.com/'
# example store for one of each thing
class MockOAuthDataStore(oauth.OAuthDataStore):
def __init__(self):
self.consumer = oauth.OAuthConsumer('key', 'secret')
self.request_token = oauth.OAuthToken('requestkey', 'requestsecret', 3600)
self.access_token = oauth.OAuthToken('accesskey', 'accesssecret', 3600, 'sessionhandle', 3600)
self.nonce = 'nonce'
def lookup_consumer(self, key):
if key == self.consumer.key:
return self.consumer
return None
def lookup_token(self, token_type, token):
token_attrib = getattr(self, '%s_token' % token_type)
if token == token_attrib.key:
return token_attrib
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
if oauth_token and oauth_consumer.key == self.consumer.key and (oauth_token.key == self.request_token.key or oauth_token.key == self.access_token.key) and nonce == self.nonce:
return self.nonce
return None
def fetch_request_token(self, oauth_consumer):
if oauth_consumer.key == self.consumer.key:
return self.request_token
return None
def fetch_access_token(self, oauth_consumer, oauth_token):
if oauth_consumer.key == self.consumer.key and oauth_token.key == self.request_token.key:
# want to check here if token is authorized
# for mock store, we assume it is
return self.access_token
return None
def renew_access_token(self, oauth_consumer, oauth_token):
if oauth_consumer.key == self.consumer.key and oauth_token.key == self.access_token.key and oauth_token.session_handle == self.access_token.session_handle:
# Want to check here for token expiration
# Also should make sure the new token is returned with a fresh expiration
# for mock store, the old token is returned
return self.access_token
return None
def authorize_request_token(self, oauth_token, user):
if oauth_token.key == self.request_token.key:
# authorize the request token in the store
# for mock store, do nothing
return self.request_token
return None
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.oauth_server = oauth.OAuthServer(MockOAuthDataStore())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
# example way to send an oauth error
def send_oauth_error(self, err=None):
# send a 401 error
self.send_error(401, str(err.message))
# return the authenticate header
header = oauth.build_authenticate_header(realm=REALM)
for k, v in header.iteritems():
self.send_header(k, v)
def do_GET(self):
# debug info
#print self.command, self.path, self.headers
# get the post data (if any)
postdata = None
if self.command == 'POST':
try:
length = int(self.headers.getheader('content-length'))
postdata = self.rfile.read(length)
except:
pass
# construct the oauth request from the request parameters
oauth_request = oauth.OAuthRequest.from_request(self.command, self.path, headers=self.headers, query_string=postdata)
# request token
if self.path.startswith(REQUEST_TOKEN_URL):
try:
# create a request token
token = self.oauth_server.fetch_request_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# user authorization
if self.path.startswith(AUTHORIZATION_URL):
try:
# get the request token
token = self.oauth_server.fetch_request_token(oauth_request)
callback = self.oauth_server.get_callback(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the callback url (to show server has it)
self.wfile.write('callback: %s' % callback)
# authorize the token (kind of does nothing for now)
token = self.oauth_server.authorize_token(token, None)
self.wfile.write('\n')
# return the token key
token_key = urllib.urlencode({'oauth_token': token.key})
self.wfile.write('token key: %s' % token_key)
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# access token
if self.path.startswith(ACCESS_TOKEN_URL):
try:
# create an access token
token = self.oauth_server.fetch_access_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# renew token
if self.path.startswith(RENEW_ACCESS_TOKEN_URL):
try:
token = self.oauth_server.refresh_access_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# protected resources
if self.path.startswith(RESOURCE_URL):
try:
# verify the request has been oauth authorized
consumer, token, params = self.oauth_server.verify_request(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the extra parameters - just for something to return
self.wfile.write(str(params))
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
def do_POST(self):
return self.do_GET()
def main():
try:
server = HTTPServer(('', 8080), RequestHandler)
print 'Test server running...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main()
| 39.967742
| 183
| 0.615281
|
3fe747323a598b256282253d47629e2f962cb317
| 4,872
|
py
|
Python
|
docs/conf.py
|
dem4ply/chibi_command
|
49efc3070bdf40e5f27146379487345b1accd427
|
[
"WTFPL"
] | null | null | null |
docs/conf.py
|
dem4ply/chibi_command
|
49efc3070bdf40e5f27146379487345b1accd427
|
[
"WTFPL"
] | null | null | null |
docs/conf.py
|
dem4ply/chibi_command
|
49efc3070bdf40e5f27146379487345b1accd427
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# chibi_command documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import chibi_command
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'chibi_command'
copyright = u"2020, Dem4ply"
author = u"Dem4ply"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = chibi_command.__version__
# The full version, including alpha/beta/rc tags.
release = chibi_command.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'chibi_commanddoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'chibi_command.tex',
u'chibi_command Documentation',
u'Dem4ply', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chibi_command',
u'chibi_command Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'chibi_command',
u'chibi_command Documentation',
author,
'chibi_command',
'One line description of project.',
'Miscellaneous'),
]
| 29.707317
| 77
| 0.687808
|
c078bcf668c8518936bac20296eabddfe8196d9e
| 8,705
|
py
|
Python
|
intersight/model/network_element_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/network_element_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/network_element_list_all_of.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.network_element import NetworkElement
globals()['NetworkElement'] = NetworkElement
class NetworkElementListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'count': (int,), # noqa: E501
'results': ([NetworkElement], none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NetworkElementListAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'network.Element' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([NetworkElement], none_type): The array of 'network.Element' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 49.460227
| 1,678
| 0.638599
|
4c277dbcf4c421307f417bacfa275d71e3c51b3f
| 2,222
|
py
|
Python
|
loss.py
|
emanuelevivoli/CompReGAN
|
33589c3871bed8adcc157bf25a45b8d12ba1af66
|
[
"MIT"
] | null | null | null |
loss.py
|
emanuelevivoli/CompReGAN
|
33589c3871bed8adcc157bf25a45b8d12ba1af66
|
[
"MIT"
] | null | null | null |
loss.py
|
emanuelevivoli/CompReGAN
|
33589c3871bed8adcc157bf25a45b8d12ba1af66
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torchvision.models.vgg import vgg16
from utils.jpeg_layer import jpegLayer
class GeneratorLoss(nn.Module):
def __init__(self):
super(GeneratorLoss, self).__init__()
vgg = vgg16(pretrained=True)
loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
for param in loss_network.parameters():
param.requires_grad = False
self.loss_network = loss_network
self.mse_loss = nn.MSELoss()
self.tv_loss = TVLoss()
self.jpeg = jpegLayer
def forward(self, out_labels, out_images, target_images, quality_factor):
# Adversarial Loss (l^{SR}_{GEN})
# ? adversarial_loss = 1 - out_labels
# ??? adversarial_loss = torch.mean(1 - out_labels)
# adversarial_loss = - torch.log(out_labels)
adversarial_loss = torch.mean(1 - out_labels)
# Perception Loss (l^{SR}_{VGG})
perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images))
# Image Loss (l^{SR}_{MSE})
image_loss = self.mse_loss(out_images, target_images)
# TV Loss (l_{TV})
tv_loss = self.tv_loss(out_images)
# Jpeg Loss
jpeg_loss = self.mse_loss(self.jpeg(out_images, quality_factor), self.jpeg(target_images, quality_factor))
return jpeg_loss, image_loss, adversarial_loss, 0.006 * perception_loss, 2e-8 * tv_loss
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
if __name__ == "__main__":
g_loss = GeneratorLoss()
print(g_loss)
| 36.42623
| 114
| 0.612061
|
01875bfbc4dfe636fce5c48188be2f2ceff9d3c5
| 1,296
|
py
|
Python
|
codes/data/torch_dataset.py
|
neonbjb/mmsr
|
2706a84f15613e9dcd48e2ba927e7779046cf681
|
[
"Apache-2.0"
] | 1
|
2020-06-27T13:18:55.000Z
|
2020-06-27T13:18:55.000Z
|
codes/data/torch_dataset.py
|
neonbjb/mmsr
|
2706a84f15613e9dcd48e2ba927e7779046cf681
|
[
"Apache-2.0"
] | null | null | null |
codes/data/torch_dataset.py
|
neonbjb/mmsr
|
2706a84f15613e9dcd48e2ba927e7779046cf681
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T
from torchvision import datasets
# Wrapper for basic pytorch datasets which re-wraps them into a format usable by ExtensibleTrainer.
class TorchDataset(Dataset):
def __init__(self, opt):
DATASET_MAP = {
"mnist": datasets.MNIST,
"fmnist": datasets.FashionMNIST,
"cifar10": datasets.CIFAR10,
}
transforms = []
if opt['flip']:
transforms.append(T.RandomHorizontalFlip())
if opt['crop_sz']:
transforms.append(T.RandomCrop(opt['crop_sz'], padding=opt['padding'], padding_mode="reflect"))
transforms.append(T.ToTensor())
transforms = T.Compose(transforms)
is_for_training = opt['test'] if 'test' in opt.keys() else True
self.dataset = DATASET_MAP[opt['dataset']](opt['datapath'], train=is_for_training, download=True, transform=transforms)
self.len = opt['fixed_len'] if 'fixed_len' in opt.keys() else len(self.dataset)
def __getitem__(self, item):
underlying_item = self.dataset[item][0]
return {'lq': underlying_item, 'hq': underlying_item,
'LQ_path': str(item), 'GT_path': str(item)}
def __len__(self):
return self.len
| 40.5
| 127
| 0.648148
|
d394e0cf28a4ad64599043a2a9863312fe69272e
| 3,468
|
py
|
Python
|
telegrambot/handlers.py
|
dcopm999/vacancy-telegrambot
|
a8a87a1eb166b3c6f50af6d4cd301e064e45d798
|
[
"MIT"
] | null | null | null |
telegrambot/handlers.py
|
dcopm999/vacancy-telegrambot
|
a8a87a1eb166b3c6f50af6d4cd301e064e45d798
|
[
"MIT"
] | 1
|
2020-10-08T11:32:10.000Z
|
2020-10-08T11:32:10.000Z
|
telegrambot/handlers.py
|
dcopm999/crm-telegrambot
|
a8a87a1eb166b3c6f50af6d4cd301e064e45d798
|
[
"MIT"
] | null | null | null |
import logging
from abc import ABC, abstractmethod
from typing import Any, Optional
from telegrambot.bot import TelegramBot
from telegrambot.mixins import HistoryMixin, CatalogMixin
logger = logging.getLogger(__name__)
class Handler(ABC):
"""
Интерфейс Обработчика объявляет метод построения цепочки обработчиков. Он
также объявляет метод для выполнения запроса.
"""
@abstractmethod
def set_next(self, handler):
pass
@abstractmethod
def handle(self, request) -> Optional[str]:
pass
class AbstractHandler(Handler):
"""
Поведение цепочки по умолчанию может быть реализовано внутри базового класса
обработчика.
"""
_next_handler: Handler = None
def __init__(self):
logger.debug('%s: __init__()', self.__class__)
self.bot = TelegramBot()
def set_next(self, handler: Handler) -> Handler:
logger.debug('%s: set_next()', self.__class__)
self._next_handler = handler
logger.debug('%s: self._next_handler=%s' % (self.__class__, handler))
# Возврат обработчика отсюда позволит связать обработчики простым
# способом, вот так:
# monkey.set_next(squirrel).set_next(dog)
return handler
@abstractmethod
def handle(self, request: Any) -> str:
if self._next_handler:
return self._next_handler.handle(request)
return None
class HandlerCommandBase(AbstractHandler):
command: str
def handle(self, request: Any) -> str:
logger.debug('%s: handle()', self.__class__)
if request.get('callback_query', False) or request.get('message').get('text') == self.command or self.command == '__all__':
self.run(request)
super().handle(request)
def run(self, request):
return None
def get_result(self):
return None
class HandlerCommandStart(HistoryMixin, HandlerCommandBase):
command = '/start'
def run(self, request):
logger.debug('%s: run()', self.__class__)
if request.get('message'):
chat_id = request.get('message').get('chat').get('id')
keyboard = self.bot.get_kb_catalog()
self.bot.send_message(chat_id, 'Catalog', reply_markup=keyboard)
super(HandlerCommandStart, self).run(request)
def get_result(self):
return 'started'
class HandlerCommandHelp(HandlerCommandBase):
command = '/help'
def get_result(self):
return 'helped'
class HandlerProductList(CatalogMixin, HandlerCommandBase):
command = '__all__'
def run(self, request):
if request.get('message'):
chat_id = request.get('message').get('chat').get('id')
category = request.get('message').get('text')
products = self.get_product_by_categoty(category)
if products is None:
pass
else:
for product in products:
buy_btn = self.bot.get_kb_inline_buy(product)
self.bot.send_photo(chat_id, product.image.path, product.caption, buy_btn)
class HandlerProductOrder(CatalogMixin, HandlerCommandBase):
command = '__all__'
def run(self, request):
if request.get('callback_query'):
chat_id = request.get('callback_query').get('message').get('chat').get('id')
product_id = request.get('callback_query').get('data')
self.set_product_order(chat_id=chat_id, product_id=product_id)
| 30.421053
| 131
| 0.647924
|
4c02be6834a4436fb3b932e8291fdd61428c3111
| 2,946
|
py
|
Python
|
itertools_2022-02-24.py
|
cartersimon/learnpython1
|
16ef511b3917e144c162861808af7bdcbcaebdc6
|
[
"MIT"
] | null | null | null |
itertools_2022-02-24.py
|
cartersimon/learnpython1
|
16ef511b3917e144c162861808af7bdcbcaebdc6
|
[
"MIT"
] | null | null | null |
itertools_2022-02-24.py
|
cartersimon/learnpython1
|
16ef511b3917e144c162861808af7bdcbcaebdc6
|
[
"MIT"
] | null | null | null |
# https://www.youtube.com/watch?v=HGOBQPFzWKo&list=RDCMUC8butISFwT-Wl7EV0hUK0BQ&start_radio=1&t=181s
# Intermediate Python Programming Course (freecodecamp.org)
# Tools for handling iterators (iterators can be used in a for loop- lists, etc)
# itertools- product, permutations, combinations, accumulate, groupby, infinite iterators
from itertools import permutations
a = [1, 2, 3]
perm = permutations(a) # create all permutations of the elements
perm = permutations(a, 2) # permutations with combinations of 2 elements
print(list(perm))
from itertools import combinations, combinations_with_replacement
a = [1, 2, 3, 4]
comb = combinations(a, 2) # returns unique combinations, length arg required
print(list(comb))
comb_wr = combinations_with_replacement(a, 2) # as combinations but can duplicate elements
from itertools import accumulate
# creates an iterator that returns acculumated sums, or other binary functions
a = [1, 2, 3, 4]
acc = accumulate(a)
# Output... [1, 3, 6, 10]
# by default adds values but can apply function
import operator
acc2 = accumulate(a, func=operator.mul()) # multiplies values
a = [1, 2, 5, 3, 4]
acc2 = accumulate(a, func=max()) # get maximum value
# output... [1, 2, 5, 5, 5]
from itertools import groupby
# creates an iterator that returns keys and groups from an iteratable
def smaller_than_3(x):
return x < 3
a = [1, 2, 3, 4]
grp_obj = groupby(a, key=smaller_than_3)
# creates 2 groups (lists), values that match the key, and those that don't
for key, value in grp_obj:
print(key,list(value))
# Output...
# True [1, 2]
# False [3, 4]
# could also use a llambda function for the key...
grp_obj2 = groupby(a, key=lambda x: x < 3)
for key, value in grp_obj2:
print(key,list(value))
people = [{'name': 'Tim', 'age': 25}, {'name': 'Dan', 'age': 25},
{'name': 'Lisa', 'age': 27}, {'name': 'Claire', 'age': 28}]
grp_obj3 = groupby(people, key=lambda x: x['age'])
for key, value in grp_obj3:
print(key,list(value))
# Output...
# 25 [{'name': 'Tim', 'age': 25}, {'name': 'Dan', 'age': 25}]
# 27 [{'name': 'Lisa', 'age': 27}]
# 28 [{'name': 'Claire', 'age': 28}]
# some infinite iterators...
from itertools import count, cycle, repeat
for i in count(10): # creates a count from 10 to infinity
print(i)
if i == 15: # break at 15
break
b = [1, 2, 3]
for i in b:
print(i) # cycles thru all values in the list infinitely
# Output...
# 1
# 2
# 3
# 1
# 2
# etc...
for i in repeat(1): # outputs 1 infinitely
print(i)
for i in repeat(1, 4): # outputs 1 four times
print(i)
| 35.493976
| 119
| 0.588934
|
52b7274aa384b84023dc45e47915c5117e677937
| 2,004
|
py
|
Python
|
01_ode_example_pendulum.py
|
benmaier/phase_space_example
|
70807ce73a749db4b64edd880bc9bf46f5a97383
|
[
"MIT"
] | 3
|
2018-11-19T12:56:15.000Z
|
2019-10-16T12:21:09.000Z
|
01_ode_example_pendulum.py
|
benmaier/phase_space_example
|
70807ce73a749db4b64edd880bc9bf46f5a97383
|
[
"MIT"
] | null | null | null |
01_ode_example_pendulum.py
|
benmaier/phase_space_example
|
70807ce73a749db4b64edd880bc9bf46f5a97383
|
[
"MIT"
] | 1
|
2018-11-19T14:02:02.000Z
|
2018-11-19T14:02:02.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# import for fast numerics
import numpy as np
# import the routine for ODE integration
from scipy.integrate import ode
# import the plotting routines
import matplotlib.pyplot as pl
# set equation of motion for pendulum
def dy_over_dt(t,y,g,l):
r"""pendulum:
d^2 x / dt^2 = - g/l * sin(x)
where x is the angle of the pendulum
in phase space:
dx/dt = v
dv/dt = (-g/l) sin(x)
as vector equation
d / x \ / 0 1 \ /sin(x)\ dy
--- | | = | | | | = --
d t \ v / \ -g/l 0 / \ v / dt
"""
result = np.zeros_like(y)
result[0] = y[1]
result[1] = -g/l * np.sin(y[0])
return result
# set parameters of pendulum
g = 1 # gravitational constant
l = 1 # pendulum length
# initial values
x_0 = 0 # intial angular position
v_0 = 1.0 # initial angular momentum
t_0 = 0 # initial time
# initial y-vector from initial position and momentum
y0 = np.array([x_0,v_0])
# initialize integrator
r = ode(dy_over_dt)
# Runge-Kutta with step size control
r.set_integrator('dopri5')
# set initial values
r.set_initial_value(y0,t_0)
# set g, l to pass to dx/dt
r.set_f_params(g,l)
# max value of time and points in time to integrate to
t_max = 10
N_spacing_in_t = 1000
# create vector of time points you want to evaluate
t = np.linspace(t_0,t_max,N_spacing_in_t)
# create vector of positions for those times
y_result = np.zeros((len(t), 2))
# loop through all demanded time points
for it, t_ in enumerate(t):
# get result of ODE integration up to the demanded time
y = r.integrate(t_)
# write result to result vector
y_result[it,:] = y
# get angle and angular momentum
angle = y_result[:,0]
angular_momentum = y_result[:,1]
# plot result
pl.plot(angle, angular_momentum,'-',lw=1)
pl.xlabel('angle $x$')
pl.ylabel('angular momentum $v$')
pl.gcf().savefig('pendulum_single_run.png',dpi=300)
pl.show()
| 22.021978
| 59
| 0.633733
|
7eb2dbadf8dfa65a84b458ee447c2841ea4512b1
| 72,373
|
py
|
Python
|
jumeg/epocher/jumeg_epocher_events.py
|
fboers/jumeg
|
e04896989faf72f4dbe7adf136e4d158d212f24a
|
[
"BSD-3-Clause"
] | null | null | null |
jumeg/epocher/jumeg_epocher_events.py
|
fboers/jumeg
|
e04896989faf72f4dbe7adf136e4d158d212f24a
|
[
"BSD-3-Clause"
] | null | null | null |
jumeg/epocher/jumeg_epocher_events.py
|
fboers/jumeg
|
e04896989faf72f4dbe7adf136e4d158d212f24a
|
[
"BSD-3-Clause"
] | null | null | null |
'''Class JuMEG_Epocher_Events
Class to extract event/epoch information and save to hdf5
extract mne-events per condition, save to HDF5 file
----------------------------------------------------------------
Author:
--------
Frank Boers <f.boers@fz-juelich.de>
Updates:
----------------------------------------------------------------
update: 19.06.2018
complete new, support for IOD and eyetracking events
----------------------------------------------------------------
Example:
--------
#--- example via obj:
from jumeg.epocher.jumeg_epocher import jumeg_epocher
from jumeg.epocher.jumeg_epocher_epochs import JuMEG_Epocher_Epochs
from jumeg.jumeg_base import jumeg_base as jb
#--
jumeg_epocher.template_path ='.'
jumeg_epocher.verbose = verbose
#---
jumeg_epocher_epochs = JuMEG_Epocher_Epochs()
#---
fname = test.fif
raw = None
condition_list = ["Cond1","Condi2"]
#--- events: finding events, store into pandas dataframe ansd save as hdf5
#--- parameter for apply_events_to_hdf
evt_param = { "condition_list":condition_list,
"template_path": template_path,
"template_name": template_name,
"verbose" : verbose
}
(_,raw,epocher_hdf_fname) = jumeg_epocher.apply_events(fname,raw=raw,**evt_param)
#--- epochs
ep_param={
"condition_list": condition_list,
"template_path" : template_path,
"template_name" : template_name,
"verbose" : verbose,
"parameter":{
"event_extention": ".eve",
"save_condition":{"events":True,"epochs":True,"evoked":True}
}}
#---
print "---> EPOCHER Epochs"
print " -> File : "+ fname
print " -> Epocher Template: "+ template_name+"\n"
jumeg_epocher.apply_epochs(fname=fname,raw=raw,**ep_param)
'''
import sys,logging
import numpy as np
import pandas as pd
import mne
from copy import deepcopy
from jumeg.base.jumeg_base import jumeg_base,JuMEG_Base_Basic
from jumeg.epocher.jumeg_epocher_hdf import JuMEG_Epocher_HDF
logger = logging.getLogger('jumeg')
__version__="2020.01.07.001"
pd.set_option('display.precision', 3)
class JuMEG_Epocher_Channel_Baseline(object):
"""
base class for baseline dict definitions & properties
"baseline" :{"method":"avg","type_input":"iod_onset","baseline": [null,0]}
ToDO: use __slots__?
"""
def __init__(self,parameter=None,label="baseline"):
super(JuMEG_Epocher_Channel_Baseline,self).__init__()
self._param = parameter
self._label = label
#---
def _get_param(self,key=None):
try:
if key in self._param[self._label]:
return self._param[self._label][key]
except:
pass
return None
#---
def _set_param(self,key=None,val=None):
self._param[self._label][key] = val
#---baseline type
@property
def method(self): return self._get_param("method")
@method.setter
def method(self,v): self._set_param("method",v)
#---baseline type
@property
def type_input(self): return self._get_param("type_input")
@type_input.setter
def type_put(self,v): self._set_param("type_input",v)
#---baseline
@property
def baseline(self): return self._get_param("baseline")
@baseline.setter
def baseline(self,v): self._set_param("baseline",v)
#---baseline
@property
def onset(self):
if type( self._get_param("baseline") ) is list: return self.baseline[0]
#---baseline
@property
def offset(self):
if type( self._get_param("baseline") ) is list: return self.baseline[1]
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Basic(JuMEG_Base_Basic):
"""
base class for definitions & properties
"""
def __init__(self):
super(JuMEG_Epocher_Basic,self).__init__()
self._rt_type_list = ['MISSED', 'TOEARLY', 'WRONG', 'HIT']
self._data_frame_stimulus_cols = ['id','onset','offset']
self._data_frame_response_cols = ['rt_type','rt','rt_id','rt_onset','rt_offset','rt_index','rt_counts','bads','selected','weighted_selected']
self._stat_postfix = '-epocher-stats.csv'
self._idx_bad = -1
#---
@property
def idx_bad(self): return self._idx_bad
#---
@property
def data_frame_stimulus_cols(self): return self._data_frame_stimulus_cols
@data_frame_stimulus_cols.setter
def data_frame_stimulus_cols(self,v): self._data_frame_stimulus_cols = v
#---
@property
def data_frame_response_cols (self): return self._data_frame_response_cols
@data_frame_response_cols.setter
def data_frame_response_cols(self,v): self._data_frame_response_cols = v
#--- rt_type list: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
@property
def rt_type_list(self): return self._rt_type_list
#--- rt type index: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
def rt_type_as_index(self,s):
return self._rt_type_list.index( s.upper() )
@property
def idx_missed(self): return self._rt_type_list.index( 'MISSED')
@property
def idx_toearly(self): return self._rt_type_list.index( 'TOEARLY')
@property
def idx_wrong(self): return self._rt_type_list.index( 'WRONG')
@property
def idx_hit(self): return self._rt_type_list.index( 'HIT')
#---
@property
def data_frame_stimulus_cols(self): return self._data_frame_stimulus_cols
@data_frame_stimulus_cols.setter
def data_frame_stimulus_cols(self,v): self._data_frame_stimulus_cols = v
#---
@property
def data_frame_response_cols(self): return self._data_frame_response_cols
@data_frame_response_cols.setter
def data_frame_response_cols(self,v): self._data_frame_response_cols = v
#--- events stat file (output as csv)
@property
def stat_postfix(self): return self._stat_postfix
@stat_postfix.setter
def stat_postfix(self, v): self._stat_postfix = v
class JuMEG_Epocher_Events_Channel_BaseBase(object):
""" base class to handel epocher template channel parameter
Parameter:
----------
label : first-level key in dictionary <None>
parameter: epocher template parameter as dictionary <None>
Example:
--------
iod_parameter= {"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}
}
response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter= iod_parameter])
print respone.channel
>> IOD
"""
def __init__(self,label=None,parameter=None):
self.label = label
self._param = parameter
#---
def get_channel_parameter(self,key=None,prefix=None):
try:
if prefix:
k = prefix + '_' + key
return self._param[self.label][k]
else:
return self._param[self.label][key]
except:
pass
return None
#return self._param
#---
def set_channel_parameter(self,key=None,val=None,prefix=None):
if key:
if prefix:
self._param[self.label][prefix + '_' + key] = val
else:
self._param[self.label][key] = val
#---
@property
def matching(self):
return self.get_channel_parameter(key="matching")
@matching.setter
def matching(self,v):
self.get_channel_parameter(key="matching",val=v)
#---
@property
def channel(self):
return self.get_channel_parameter(key="channel")
@channel.setter
def channel(self,v):
self.set_channel_parameter(key="channel",val=v)
#---
@property
def prefix(self): return self.get_channel_parameter(key="prefix")
@prefix.setter
def prefix(self,v): self.set_channel_parameter(key="prefix",val=v)
class JuMEG_Epocher_Events_Channel_Base(JuMEG_Epocher_Events_Channel_BaseBase):
""" base class to handel epocher template channel parameter
Parameter:
----------
label : first-level key in dictionary <None>
parameter: epocher template parameter as dictionary <None>
Example:
--------
iod_parameter= {"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}
}
response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter= iod_parameter])
print respone.channel
>> IOD
"""
def __init__(self,label=None,parameter=None):
super().__init__()
self.label = label
self._param = parameter
#---
@property
def matching_type(self): return self.get_channel_parameter(key="matching_type")
@matching_type.setter
def matching_type(self,v): self.get_channel_parameter(key="matching_type",val=v)
#---
@property
def type_input(self): return self.get_channel_parameter(key="type_input")
@type_input.setter
def type_input(self,v): self.set_channel_parameter(key="type_input",val=v)
#---
@property
def type_offset(self): return self.get_channel_parameter(key="type_offset")
@type_offset.setter
def type_offset(self,v): self.set_channel_parameter(key="type_offset",val=v)
#---
@property
def type_output(self): return self.get_channel_parameter(key="type_output")
@type_output.setter
def type_output(self,v): self.set_channel_parameter(key="type_output",val=v)
#---type_result: "hit","wrong","missed"
@property
def type_result(self): return self.get_channel_parameter(key="type_result")
@type_result.setter
def type_result(self,v): self.set_channel_parameter(key="type_result",val=v)
#---
@property
def channel_parameter(self): return self._param[self.channel]
#---
@property
def parameter(self): return self._param
@parameter.setter
def parameter(self,v): self._param = v
#---
@property
def get_value_with_prefix(self,v): return self.get_channel_parameter(self,key=v,prefix=self.prefix)
#---
def get_parameter(self,k): return self._param[k]
#---
def set_parameter(self,k,v): self._param[k]=v
#---
@property
def time_pre(self): return self.get_parameter("time_pre")
@time_pre.setter
def time_pre(self,v): self.set_parameter("time_pre",v)
#---
@property
def time_post(self): return self.get_parameter("time_post")
@time_post.setter
def time_post(self,v): self.set_parameter("time_post",v)
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Events_Channel_IOD(object):
"""class to handel epocher template IOD parameter
Parameter:
----------
label : first-level key in dictionary <iod>
parameter: epocher template parameter as dictionary <None>
Return:
--------
None
Example:
--------
input json dictonary
parameter={
"default":{
"Stim":{ "events":{"stim_channel":"STI 014","output":"onset","consecutive":true,"min_duration":0.0005,"shortest_event":1,"mask":0},
"event_id":84,"and_mask":255,"system_delay_ms":0.0,"early_ids_to_ignore":null},
"IOD":{ "events":{ "stim_channel":"STI 013","output":"onset","consecutive":true,"min_duration":0.0005,"shortest_event":1,"mask":0},
"and_mask":128,"window":[0.0,0.2],"counts":"first","system_delay_ms":0.0,"early_ids_to_ignore":null,"event_id":128,"and_mask":255}
},
"cond1":{
"postfix":"cond1",
"info" :" my comments",
"iod" :{"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}},
"StimImageOnset" : {"event_id":94},
"IOD" : {"event_id":128}
}
}
iod = JuMEG_Epocher_Events_Channel_IOD(label="response",parameter= parameter["condi1"])
print iod.response.channel
>> IOD
"""
def __init__(self,label="iod",parameter=None):
#super(JuMEG_Epocher_Events_Channel_IOD,self).__init__(label="iod",meter=None)
self._info = None
self.label = label
self._param = parameter
self.response = JuMEG_Epocher_Events_Channel_Base(label="response",parameter=parameter["iod"])
self.marker = JuMEG_Epocher_Events_Channel_Base(label="marker", parameter=parameter["iod"])
#---
@property
def iod_matching(self): return self.response.matching
@iod_matching.setter
def iod_matching(self,v): self.response.matching = v
#---
@property
def info(self): return self._info
@info.setter
def info(self,v): self._info = v
#---
@property
def parameter(self): return self._param
@parameter.setter
def parameter(self,v):
self._param = v
self.response.parameter = v["iod"]
self.marker.parameter = v["iod"]
#---
@property
def response_channel_parameter(self): return self._param[self.response.channel]
#---
@property
def marker_channel_parameter(self): return self._param[self.marker.channel]
#---
def info(self):
"""
logs parameter with logger.info
:return:
"""
logger.info( jumeg_base.pp_list2str(self._param) )
class JuMEG_Epocher_Events_Channel(JuMEG_Epocher_Events_Channel_Base):
'''
class for marker and response channel
'''
def __init__(self,label=None,parameter=None):
super().__init__(label=label,parameter=parameter)
self._info = None
#---
@property
def info(self): return self._info
@info.setter
def info(self,v): self._info = v
#---
@property
def stim_channel(self): return self._param[ self.channel ]["events"]["stim_channel"]
@property
def stim_output(self): return self._param[ self.channel ]["events"]["output"]
class JuMEG_Epocher_Events_Window(JuMEG_Epocher_Events_Channel):
"""
sub class, wrapper for this dict
"window_matching":{
"matching": true,
"channel": "ET_Events",
"window_onset": "iod_onset",
"window_offset": "resp_onset",
"event_type": "onset",
"prefix": "wet"
},
Parameter:
----------
param: label
param: parameter
"""
def __init__(self,label=None,parameter=None):
super().__init__(label=label,parameter=parameter)
@property
def window_onset(self): return self.get_channel_parameter(key="window_onset")
@property
def window_offset(self): return self.get_channel_parameter(key="window_offset")
@property
def event_type(self): return self.get_channel_parameter(key="event_type")
#---
class JuMEG_Epocher_ResponseMatching(JuMEG_Epocher_Basic):
"""
CLS to do response matching
for help refer to JuMEG_Epocher_ResponseMatching.apply() function
"""
#---
def __init__(self,raw=None,stim_df=None,stim_param=None,stim_type_input="onset",stim_prefix="stim",resp_df=None,
resp_param=None,resp_type_input="onset",resp_type_offset="offset",resp_prefix="resp",verbose=False,debug=False):
super().__init__()
self.column_name_list_update = ['div','type','index','counts']
self.column_name_list_extend = ['bads','selected','weighted_selected']
self.raw = raw
self.verbose = verbose
self.debug = debug
self.stim_df_orig = stim_df
self.stim_df = None
self.stim_param = stim_param
self.stim_type_input = stim_type_input
self.stim_prefix = stim_prefix
#---
self.resp_df_orig = resp_df
self.resp_df = None
self.resp_param = resp_param
self.resp_type_input = resp_type_input
self.resp_type_offset = resp_type_offset
self.resp_prefix = resp_prefix
#---
self.DataFrame = None
#---
@property
def div_column(self): return self.resp_prefix+"-div"
def reset_dataframe(self,max_rows):
"""
reset output pandas dataframe
add stimulus,response data frame columns and extend with prefix
init with zeros x MAXROWS
Parameters
----------
max_rows: number of dataframe rows
Returns
-------
dataframe[ zeros x MaxRows ]
"""
col=[]
col.extend( self.stim_df.columns.tolist() )
col.extend( self.resp_df.columns.tolist() )
for key in self.column_name_list_update:
if self.resp_prefix:
k = self.resp_prefix +'_'+ key
else: k = key
if k not in col:
col.append( k )
for k in self.column_name_list_extend:
if k not in col:
col.append(k)
return pd.DataFrame(0,index=range(max_rows),columns=col)
def update(self,**kwargs):
""" update CLS parameter
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
stim_df : pandas.DataFrame [None]
stimulus channel data frame
stim_param : dict() [None]
stimulus parameter from template
stim_type_input : string ["onset"]
data frame column name to process as stimulus input
stim_prefix : string ["iod"]
stimulus column name prefix e.g. to distinguish between different "onset" columns
resp_df : pandas.DataFrame [None]
response channel data frame
resp_param : dict() [None]
response parameter from template
resp_type_input : string ["onset"]
data frame column name to process as response input
resp_prefix : string ["iod"]
response column name prefix e.g. to distinguish between different "onset" columns
verbose : bool [False]
printing information debug
"""
self.raw = kwargs.get("raw",self.raw)
self.stim_param = kwargs.get("stim_param",self.stim_param)
self.stim_type_input = kwargs.get("stim_type_input",self.stim_type_input)
self.stim_prefix = kwargs.get("stim_prefix",self.stim_prefix)
self.resp_param = kwargs.get("resp_param",self.resp_param)
self.resp_type_input = kwargs.get("resp_type_input",self.resp_type_input)
self.resp_type_offset= kwargs.get("resp_type_offset",self.resp_type_offset)
self.resp_prefix = kwargs.get("resp_prefix",self.resp_prefix)
if "verbose" in kwargs.keys():
self.verbose = kwargs.get("verbose")
if "stim_df" in kwargs.keys():
self.stim_df = None
self.stim_df_orig = kwargs.get("stim_df") # df
if "resp_df" in kwargs.keys():
self.resp_df = None
self.resp_df_orig = kwargs.get("resp_df") # df
self.DataFrame = None
if not self.resp_type_offset:
self.resp_type_offset = self.resp_type_input
def _ck_errors(self):
""" checking for errors
Returns:
--------
False if error
"""
#--- ck errors
err_msg =[]
if (self.raw is None):
err_msg.append("ERROR no RAW obj. provided")
if (self.stim_df_orig is None):
err_msg.append("ERROR no Stimulus-Data-Frame obj. provided")
if (self.stim_param is None):
err_msg.append("ERROR no stimulus parameter obj. provided")
if (self.stim_type_input is None):
err_msg.append("ERROR no stimulus type input provided")
if (self.resp_df_orig is None):
err_msg.append("ERROR no Response-Data-Frame obj. provided")
if (self.resp_param is None):
err_msg.append("ERROR no response parameter obj. provided")
if (self.resp_type_input is None):
err_msg.append("ERROR no response type input provided")
try:
if err_msg :
raise(ValueError)
except:
logger.exception(jumeg_base.pp_list2str(err_msg,"JuMEG Epocher Response Matching ERROR check"))
return False
return True
def calc_max_rows(self,tsl0=None,tsl1=None,resp_event_id=None,early_ids_to_ignore=None):
"""
counting the necessary number of rows for dataframe in advance
depreached
Parameter
---------
tsl0 : response window start in tsls <None>
tsl1 : response window end in tsls <None>
resp_event_id : response event ids <None>
early_ids_to_ignore : ignore this response ids if there are to early pressed <None>
Returns
-------
number of rows to setup the dataframe
"""
max_rows = 0
#--- get rt important part of respose df
resp_tsls = self.resp_df[ self.resp_type_input ]
for idx in self.stim_df.index :
# st_tsl_onset = self.stim_df[ self.stim_type_input ][idx]
st_window_tsl0 = self.stim_df[ self.stim_type_input ][idx] + tsl0
st_window_tsl1 = self.stim_df[ self.stim_type_input ][idx] + tsl1
if (st_window_tsl0 < 0) or (st_window_tsl1 < 0) : continue
#--- ck for toearly responses
if tsl0 > 0:
resp_index = self.find_toearly(tsl1=tsl0,early_ids_to_ignore=early_ids_to_ignore)
if isinstance(resp_index, np.ndarray):
max_rows+=1
continue
#--- find index of responses from window-start till end of res_event_type array [e.g. onset / offset]
resp_in_index = self.resp_df[ ( st_window_tsl0 <= resp_tsls ) & ( resp_tsls <= st_window_tsl1 ) ].index
#--- MISSED response
if resp_in_index.empty:
max_rows+=1
continue
#---count == all
#--- no response count limit e.g. eye-tracking saccards
#--- count defined resp ids ignore others
if self.resp_param['counts'] == 'all':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
max_rows+=idx_isin_true.size
#--- ck if first resp is True/False e.g. IOD matching
elif self.resp_param['counts'] == 'first':
max_rows+=1
#--- ck for response count limit
elif self.resp_param['counts']:
#--- found responses are <= allowed resp counts
max_rows+=resp_in_index.size
else:
#--- Wrong: found response counts > counts
max_rows+=resp_in_index.size
return max_rows
#---
def info(self):
"""
print info
Parameter
---------
Return
--------
prints statistic from column <response-prefix> -div
e.g. prints differences in tsls between stimulus and IOD onset
"""
logger.info("---> Info Response Matching:\n{}".format(self.DataFrame.to_string()))
ddiv = self.DataFrame[ self.resp_prefix + "_div" ]
zero_ep = ddiv[ddiv == 0 ]
n_zeros = ( ddiv == 0 ).sum()
tsldiv = abs( ddiv.replace(0,np.NaN) )
dmean = tsldiv.mean()
dstd = tsldiv.std()
dmin = tsldiv.min()
dmax = tsldiv.max()
tdmean,tdstd,tdmin,tdmax = 0,0,0,0
if self.raw:
if not np.isnan(dmean):
tdmean = self.raw.times[ int(dmean)]
if not np.isnan(dstd):
tdstd = self.raw.times[ int(dstd )]
if not np.isnan(dmin):
tdmin = self.raw.times[ int(dmin )]
if not np.isnan(dmax):
tdmax = self.raw.times[ int(dmax )]
logger.info("\n".join(["\n --> Response Matching time difference [ms]",
" -> bad epochs count : {:d}".format(n_zeros),
" -> bad epochs : {}\n".format(zero_ep),
" -> mean [ s ]: {:3.3f} std: {:3.3f} max: {:3.3f} min: {:3.3f}".format(tdmean,tdstd,tdmin,tdmax),
" -> mean [tsl]: {:3.3f} std: {:3.3f} max: {:3.3f} min: {:3.3f}".format(dmean,dstd,dmin,dmax),"-"*50]))
#---
def _set_stim_df_resp(self,df,stim_idx=None,df_idx=None,resp_idx=None,resp_type=0,counts=0):
"""set dataframe row
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe
"""
for col in self.stim_df.columns:
df[col][df_idx] = self.stim_df[col][stim_idx]
if self.is_number( resp_idx ):
for col in self.resp_df.columns:
df[col][df_idx] = self.resp_df[col][resp_idx]
df[self.resp_prefix +'_index'][df_idx] = resp_idx
df[self.resp_prefix + '_type'][df_idx] = resp_type
df[self.resp_prefix + "_div"][df_idx] = df[self.resp_type_input][df_idx] - df[self.stim_type_input][df_idx]
else:
for col in self.resp_df.columns:
df[col][df_idx] = 0
df[self.resp_prefix +'_index'][df_idx] = -1 # None/nan needs change to np.float
df[self.resp_prefix +'_type'][df_idx] = self.idx_missed # resp_type
df[self.resp_prefix + "_div"][df_idx] = 0 # None
df[self.resp_prefix + "_counts"][df_idx] = counts
return df
def _set_hit(self,df,stim_idx=None,df_idx=None,resp_idx=None):
""" set dataframe row for correct responses
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe index
"""
cnt = 1
if isinstance(resp_idx,(list)):
for ridx in resp_idx:
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=ridx,resp_type=self.idx_hit,counts=cnt)
cnt += 1
else:
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=resp_idx,resp_type=self.idx_hit,counts=cnt)
return df_idx
def _set_wrong(self,df,stim_idx=None,df_idx=None,resp_idx=None):
""" set dataframe row for wrong responses
Parameter
---------
df : dataframe
stim_idx : index <None>
df_idx : <None>
resp_idx : <None>
resp_type: <0>
counts : <0>
Return
--------
dataframe index
"""
cnt = 0
for ridx in resp_idx:
#df_idx += 1
cnt += 1
self._set_stim_df_resp(df,stim_idx=stim_idx,df_idx=df_idx,resp_idx=ridx,resp_type=self.idx_wrong,counts=cnt)
return df_idx
#---
def find_toearly(self,tsl0=0,tsl1=None,early_ids_to_ignore=None):
""" look for part of to early response in dataframe
Parameters
----------
tsl0 : start tsl range <None>
tsl1 : end tsl range <None>
early_ids_to_ignores: ignore this ids in window tsl-onset <= tsl < tsl0 <None>
Return
------
array Int64Index([ number of toearly responses ], dtype='int64')
"""
if self.resp_param["early_ids_to_ignore"] == 'all':
return
early_idx = self.resp_df[ ( tsl0 <= self.resp_df[ self.resp_type_input ] ) & ( self.resp_df[ self.resp_type_input ] < tsl1 ) ].index
#--- ck for button is pressed released
if self.resp_type_input != self.resp_type_offset:
early_idx_off = self.resp_df[ ( tsl0 <= self.resp_df[ self.resp_type_offset] ) & ( self.resp_df[ self.resp_type_offset ] < tsl1 ) ].index
early_idx = np.unique( np.concatenate((early_idx,early_idx_off), axis=0) )
if early_idx.any():
if self.resp_param['early_ids_to_ignore']:
if early_ids_to_ignore.any():
evt_found = self.resp_df[self.resp_prefix + "_id"][ early_idx ].isin( early_ids_to_ignore ) # true or false
if evt_found.all():
return
found = np.where( evt_found == False )[0]
return found
else:
return early_idx
return None
#---
def apply(self,*kargs, **kwargs):
"""
apply response matching
matching correct responses with respect to <stimulus channel> <output type> (onset,offset)
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
stim_df : pandas.DataFrame [None]
stimulus channel data frame
stim_param : dict() [None]
stimulus parameter from template
stim_type_input : string ["onset"]
data frame column name to process as stimulus input
stim_prefix : string ["iod"]
stimulus column name prefix e.g. to distinguish between different "onset" columns
resp_df : pandas.DataFrame [None]
response channel data frame
resp_param : dict() [None]
response parameter from template
resp_type_input : string ["onset"]
data frame column name to process as response input
resp_prefix : string ["iod"]
response column name prefix e.g. to distinguish between different "onset" columns
verbose : bool [False]
printing information debug
Returns
-------
pandas.DataFrame
"""
self.update(*kargs,**kwargs)
if not self._ck_errors():
return
#--- ck RT window range
if ( self.resp_param['window'][0] >= self.resp_param['window'][1] ):
logger.error(" --> ERROR in response parameter window range: start: {} > end: {}".format(self.resp_param['window'][0],self.resp_param['window'][1]))
return
(r_window_tsl_start, r_window_tsl_end ) = self.raw.time_as_index( self.resp_param['window'] );
#--- get respose code -> event_id [int or string] as np array
resp_event_id = jumeg_base.range_to_numpy( self.resp_param['event_id'] )
#logger.info("events: {} stim_prefix: {} res_prefix: {}".format(resp_event_id,self.stim_prefix,self.resp_prefix))
#logger.info("stim_df : {}".format(self.stim_df_orig.columns))
#--- ck/get STIMULUS/MARKER channel event ids to ignore
if self.stim_param.get("event_ids_to_ignore"):
event_ids_to_ignore = jumeg_base.range_to_numpy( self.stim_param["event_ids_to_ignore"] )
evt_label = self.stim_param.get("event_prefix","stim")+"_id"
idx = np.where( ~self.stim_df_orig[evt_label].isin(event_ids_to_ignore) )[0]
self.stim_df = self.stim_df_orig.loc[idx,:]
else:
self.stim_df = self.stim_df_orig
#--- get response ids to ignore
if self.resp_param.get("event_ids_to_ignore"):
event_ids_to_ignore = jumeg_base.range_to_numpy( self.resp_param["event_ids_to_ignore"] )
resp_label = self.resp_param.get("event_prefix","resp" )+"_id"
idx = np.where( ~self.resp_df_orig[resp_label].isin(event_ids_to_ignore) )[0]
self.resp_df = self.resp_df_orig.loc[idx,:]
else:
self.resp_df = self.resp_df_orig
#--- ck if any toearly-id is defined, returns None if not
early_ids_to_ignore = None
if self.resp_param.get("early_ids_to_ignore"):
if self.resp_param["early_ids_to_ignore"] != 'all':
early_ids_to_ignore = jumeg_base.range_to_numpy( self.resp_param['early_ids_to_ignore'] )
#--- loop for all stim events
ridx = 0
df_idx = -1
#--- get rt important part of respose df
resp_tsls = self.resp_df[ self.resp_type_input ]
max_rows = self.calc_max_rows(tsl0=r_window_tsl_start,tsl1=r_window_tsl_end,resp_event_id=resp_event_id,early_ids_to_ignore=early_ids_to_ignore)
df = self.reset_dataframe( max_rows ) #len(self.stim_df.index)) #max_rows)
for idx in self.stim_df.index :
st_window_tsl0 = self.stim_df[ self.stim_type_input ][idx] + r_window_tsl_start
st_window_tsl1 = self.stim_df[ self.stim_type_input ][idx] + r_window_tsl_end
if (st_window_tsl0 < 0) or (st_window_tsl1 < 0) : continue
#logger.info(" -> resp param wtsl0: {} wtsl1:{} idx:{}".format(st_window_tsl0,st_window_tsl1,idx))
#--- to-early responses e.g. response winfow[0.01,1,0] => => 0<= toearly window < 0.01
if r_window_tsl_start > 0:
resp_index = self.find_toearly(tsl1=st_window_tsl0,early_ids_to_ignore=early_ids_to_ignore)
if isinstance(resp_index, np.ndarray):
df_idx +=1
self._set_stim_df_resp(df,stim_idx=idx,df_idx=idx,resp_idx=ridx,resp_type=self.idx_toearly,counts=resp_index.size )
if self.debug:
logger.debug("--->ToEarly : {}\n --> stim df: {}".format(df_idx,self.stim_df))
continue
#--- find index of responses from window-start till end of res_event_type array [e.g. onset / offset]
resp_in_index = self.resp_df[ ( st_window_tsl0 <= resp_tsls ) & ( resp_tsls <= st_window_tsl1) ].index
if self.debug:
logger.debug(" -> resp in idx : {} \n -> tsls:\n{}\n -> {}".format(resp_in_index,resp_tsls,self.resp_df.loc[ resp_in_index,:]))
#--- MISSED response
if resp_in_index.empty:
df_idx += 1
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
continue
#---count == all
#--- no response count limit e.g. eye-tracking saccards
#--- count defined resp ids ignore others
#--- e.g.: find 11 in seq starting with 5 => 5,7,8,11 => resp_event_id =[11]
if self.resp_param['counts'] == 'all':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
#--- get index of True Hits
resp_in_idx_hits = resp_in_index[idx_isin_true]
if self.debug:
logger.debug("---> <counts == all>: idx:{}\n -> {}".format(idx_isin_true,resp_in_idx_hits))
df_idx += 1
if idx_isin_true.shape[0]:
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_idx_hits)
else:
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED in <counts == all>: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
elif self.resp_param['counts'] == 'any':
#--- get True/False index
idx_isin_true = np.where( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) )[0]
logger.info("ANY:\n {}\n event id: {}\n isin: {}".format(self.resp_df[self.resp_prefix + "_id"][ resp_in_index ],resp_event_id,idx_isin_true ))
df_idx += 1
if idx_isin_true.shape[0]:
#--- get index of True Hits
resp_in_idx_hits = resp_in_index[ idx_isin_true[0] ]
# if self.debug:
logger.info("---> <counts == any>: idx:{}\n -> {}".format(idx_isin_true,resp_in_idx_hits))
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_idx_hits)
else:
self._set_stim_df_resp( df,stim_idx=idx,df_idx=idx,resp_idx=None,resp_type=self.idx_missed,counts=0 )
if self.debug:
logger.debug("---> MISSED in <counts == all>: idx:{}\n -> {}".format(idx,self.resp_df.loc[resp_in_index,:]))
#--- ck if first resp is True/False e.g. IOD matching
elif self.resp_param['counts'] == 'first':
if ( self.resp_df[self.resp_prefix + "_id"][ resp_in_index[0] ] in resp_event_id ):
df_idx += 1
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=[ resp_in_index[0] ] )
else:
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=[resp_in_index[0]] )
#--- ck for response count limit
elif self.resp_param['counts']:
#--- found responses are <= allowed resp counts
if ( resp_in_index.size <= self.resp_param['counts'] ):
#--- HITS: all responses are in response event id
if np.all( self.resp_df[self.resp_prefix + "_id"][ resp_in_index ].isin( resp_event_id ) ) :
df_idx += 1
self._set_hit(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index )
else:
#--- Wrong: not all responses are in response event id =>found responses are > allowed resp counts
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index)
#--- Wrong: found response counts > counts
else:
df_idx += 1
self._set_wrong(df,stim_idx=idx,df_idx=idx,resp_idx=resp_in_index)
self.DataFrame = df
if self.debug:
self.info()
return df
class JuMEG_Epocher_WindowMatching(JuMEG_Epocher_Basic):
"""
find events form events-dataframe
which occures in a window between <marker_onset> and <marker_offset> given by a marker-window
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
marker_df : pandas.DataFrame [None]
data frame with onset/offset columns
window_onset : string ["onset"]
window-DataFrame column window onset
window_offset : string ["offset"]
window-DataFrame column window offset
event_df : pandas.DataFrame events
e.g. event/response codes from channel in <stim> <resp> group e.g.: STI 014/STI 013
event_type : DataFrame column label e.g: <prefix>onset
verbose : bool [False]
printing information debug
debug : bool [False]
Example Template:
-----------------
"SeResp": {
"postfix": "SeResp",
"time_pre": -0.2,
"time_post": 6.0,
"info": "search task IODonset and first buton press",
"marker": {
"channel": "StimImageOnset",
"type_input": "iod_onset",
"type_output": "iod_onset",
"prefix": "iod",
"type_result": "hit"
},
"response": {
"matching": true,
"channel": "RESPONSE",
"type_input": "resp_onset",
"type_offset": "resp_offset",
"prefix": "resp"
},
"window_matching":{
"matching": true,
"channel": "ETevents",
"window_onset": "iod_onset",
"window_offset": "resp_onset",
"event_type": "stim_onset",
"prefix": "winET"
},
"StimImageOnset": {
"event_id": 84
},
"RESPONSE": {
"events": {
"stim_channel": "STI 013",
"output": "onset",
"consecutive": true,
"min_duration": 0.0005,
"shortest_event": 1,
"initial_event": true,
"mask": null
},
"window": [
0.0,
6.0
],
"counts": "first",
"system_delay_ms": 0.0,
"early_ids_to_ignore": null,
"event_id": "1,2",
"and_mask": 3
},
"ETevents": {
"event_id": "250-260"
}
}
"""
def __init__(self,**kwargs):
super().__init__()
self.raw = None
self.window_onset = None
self.window_offset = None
self.marker_df = None
self.event_df = None
self.event_type = None
self.DataFrame = None
self.verbose = False
self.debug = False
def update(self,**kwargs):
"""
update CLS parameter
Parameters
----------
raw : raw obj [None]
used to calc time-window-range in TSLs
marker_df : pandas.DataFrame [None]
data frame with onset/offset columns
window_onset : string ["onset"]
window-DataFrame column window onset
window_offset : string ["offset"]
window-dataframe column window offset
events_df : pandas.DataFrame events
e.g. event/response codes from channel in <stim> <resp> group e.g.: STI 014/STI 013
verbose : bool [False]
printing information debug
debug : bool [False]
"""
self.raw = kwargs.get("raw",self.raw)
self.window_onset = kwargs.get("window_onset",self.window_onset)
self.window_offset = kwargs.get("window_offset",self.window_offset)
self.event_type = kwargs.get("event_type",self.event_type)
if "verbose" in kwargs.keys():
self.verbose = kwargs.get("verbose")
if "marker_df" in kwargs.keys():
self.marker_df = kwargs.get("marker_df") # df
if "event_df" in kwargs.keys():
self.event_df = kwargs.get("event_df") # df
self.DataFrame = None
#---
def info(self):
"""
print info
Parameter
---------
Return
--------
prints DataFrame
"""
logger.info("---> Info Window Matching:\n"+
" --> window onset : {}".format(self.window_onset) +
" --> window offset: {}".format(self.window_onset) +
" --> event type : {}".format(self.event_type) +
" --> DataFrame:\n{}".format(self.DataFrame.to_string()))
def apply(self,**kwargs):
self.update(**kwargs)
#--- get onset or offsets from events
evt = self.event_df[self.event_type]
cl1 = self.event_df.columns.tolist()
cl2 = self.marker_df.columns.tolist()
cl = []
cl.extend(cl1)
cl.extend(cl2)
dfs = []
for idx in self.marker_df.index:
wdf = self.marker_df.iloc[idx] # get series
c1 = self.event_df[ self.event_type ] >= wdf[ self.window_onset ]
c2 = self.event_df[ self.event_type ] < wdf[ self.window_offset ]
df = self.event_df[c1 & c2]
if not df.any: continue
#--- numpy
d = np.zeros([len(df),len(cl)],dtype=np.int32)
d[:,0:len(cl1)] += df.get_values()
d[:,len(cl1): ] += wdf.get_values()
dfs.append(pd.DataFrame(d,columns=cl,index=df.index.get_values()))
# print("HITS df last:\n{}".format(dfs[-1]))
self.DataFrame = pd.concat(dfs)
self.DataFrame.reset_index(drop=False,inplace=True)
self.DataFrame["selected"]=1
#---
if self.debug:
self.info()
return self.DataFrame
class JuMEG_Epocher_Events(JuMEG_Epocher_HDF,JuMEG_Epocher_Basic):
'''
Main class to find events
-> reading epocher event template file
-> for each condition find events using mne.find_events function
-> looking for IOD and response matching
-> store results into pandas dataframes
-> save as hdf5 for later to generate epochs,averages
Example
--------
from jumeg.epocher.jumeg_epocher import jumeg_epocher
from jumeg.epocher.jumeg_epocher_epochs import JuMEG_Epocher_Epochs
jumeg_epocher_epochs = JuMEG_Epocher_Epochs()
jumeg_epocher.template_path = '.'
condition_list = ['test01','test02']
fname = "./test01.fif"
param = { "condition_list":condition_list,
"do_run": True,
"template_name": "TEST01",
"save": True
}
(_,raw,epocher_hdf_fname) = jumeg_epocher.apply_events_to_hdf(fname,**param)
'''
#---
def __init__(self):
super(JuMEG_Epocher_Events, self).__init__()
self.parameter= None
self.iod = None
self.stimulus = None
self.response = None
self.window = None
self.event_data_parameter={"events":{
"stim_channel" : "STI 014",
"output" : "onset",
"consecutive" : True,
"min_duration" : 0.0001,
"shortest_event" : 1,
"initial_event" : True,
"mask" : None
},
"event_id" : None,
"and_mask" : None,
"system_delay_ms" : 0.0
}
self.ResponseMatching = JuMEG_Epocher_ResponseMatching()
self.WindowMatching = JuMEG_Epocher_WindowMatching()
#---
@property
def event_data_stim_channel(self): return self.event_data_parameter["events"]["stim_channel"]
@event_data_stim_channel.setter
def event_data_stim_channel(self,v): self.event_data_parameter["events"]["stim_channel"]=v
#---
def channel_events_to_dataframe(self):
"""
find events from groups
mne.pick_types(stim=True)
stimulus group <stim> [STI 014 TRIGGER, ET_events, ...]
mne.pick_types(resp=True)
response group [STI 013 RESPONSE,...]
store event information as pandas dataframe and save in hdf5 obj key: /events
"""
#--- stimulus channel group
for ch_idx in jumeg_base.picks.stim(self.raw):
#print(ch_idx)
ch_label = jumeg_base.picks.picks2labels(self.raw,ch_idx)
#print(ch_label)
self.event_data_stim_channel = ch_label
self._channel_events_dataframe_to_hdf(ch_label,"stim")
#--- response channel group
for ch_idx in jumeg_base.picks.response(self.raw):
ch_label = jumeg_base.picks.picks2labels(self.raw,ch_idx)
self.event_data_stim_channel = ch_label
self._channel_events_dataframe_to_hdf(ch_label,"resp")
#---
def _channel_events_dataframe_to_hdf(self,ch_label,prefix):
""" save channel event dataframe to HDF obj
Parameter
---------
string : channel label e.g.: 'STI 014'
pd dataframe:
dict : info dict with parameter
Results
-------
None
"""
self.event_data_stim_channel = ch_label
#print(self.event_data_parameter)
found = self.events_find_events(self.raw,prefix=prefix,**self.event_data_parameter)
#print(found)
if found:
df = found[0]
info = found[1]
#if type(df) == "<class 'pandas.core.frame.DataFrame'>":
key = self.hdf_node_name_channel_events +"/"+ ch_label
storer_attrs = {'info_parameter': info}
self.hdf_obj_update_dataframe(df.astype(np.int64),key=key,**storer_attrs )
# https://stackoverflow.com/questions/17468878/pandas-python-how-to-count-the-number-of-records-or-rows-in-a-dataframe
if self.verbose:
ids = pd.unique(df.iloc[:,0])
label = df.columns[0]
ids.sort()
msg = [ "---> Events in DataFrame column: {}".format(label) ]
for id in ids:
df_id = df[ df[ label ] == id ]
msg.append(" -> id: {:4d} counts: {:5d}".format(id,len(df_id.index)) )
logger.info("\n".join(msg))
#---
def apply_iod_matching(self,raw=None):
'''
apply image-onset-detection (IOD),
generate pandas dataframe with columns for iod
e.g. from template parameter for a condition
"iod" :{"marker" :{"channel":"StimImageOnset","type_input":"img_onset","prefix":"img"},
"response":{"matching":true,"channel":"IOD","type_input":"iod_onset","prefix":"iod"}},
Parameters
----------
raw: obj [None]
mne.raw obj
Returns
--------
pandas dataframe
columns with
marker-prefix => id,onset,offset
response-prefix => type,div,id,onset,offset,index,counts
additionalcolumns => bads,selected,weighted_sel
marker info
'''
if not self.iod.iod_matching: return None,None
#--- marker events .e.g. STIMULUS
# logger.info(self.iod.marker_channel_parameter)
try:
mrk_df,mrk_info = self.events_find_events(raw,prefix=self.iod.marker.prefix,**self.iod.marker_channel_parameter)
except:
logger.warning(" --> WARNING: IOD Matching: no events found: \n{}\n ".format(self.event_data_parameter))
return None,None
#--- resonse eventse.g. IOD
resp_df,resp_info = self.events_find_events(raw,prefix=self.iod.response.prefix,**self.iod.response_channel_parameter)
if resp_info.get("system_delay_is_applied"):
mrk_info["system_delay_is_applied"] = True
df = self.ResponseMatching.apply(raw=raw,stim_df=mrk_df,resp_df=resp_df,
stim_param = deepcopy(self.iod.marker_channel_parameter),
stim_type_input = self.iod.marker.type_input,
stim_prefix = self.iod.marker.prefix,
resp_param = deepcopy(self.iod.response_channel_parameter),
resp_type_input = self.iod.response.type_input,
resp_prefix = self.iod.response.prefix,
verbose = self.verbose,
debug = self.debug
)
if self.verbose:
if "iod_div" in df.columns:
logger.info(" -> Stimulus Onset and IOD div [tsl] mean: {0:3.1f} std:{1:3.1f}".format(df["iod_div"].mean(),df["iod_div"].std()))
return df,mrk_info
#---
def update_parameter(self,param=None):
'''update parameter
-> init with default parameter
-> merege and overwrite defaults with parameter defined for condition
-> init special objs (marker,response,iod) if defined
Parameter
---------
param: <None>
'''
self.parameter = None
self.parameter = deepcopy( self.template_data['default'] )
self.parameter = self.template_update_and_merge_dict( self.parameter,param )
#---
self.marker = JuMEG_Epocher_Events_Channel(label="marker",parameter=self.parameter)
self.response = JuMEG_Epocher_Events_Channel(label="response",parameter=self.parameter)
self.window = JuMEG_Epocher_Events_Window(label="window_matching",parameter=self.parameter)
self.iod = JuMEG_Epocher_Events_Channel_IOD(label="iod",parameter=self.parameter)
#---
def events_store_to_hdf(self,fname=None,raw=None,condition_list=None,overwrite_hdf=False,
template_path=None,template_name=None,hdf_path=None,verbose=False,debug=False):
"""
find & store epocher data to hdf5:
-> readding parameter from epocher template file
-> find events from raw-obj using mne.find_events
-> apply response matching if true
-> save results in pandas dataframes & HDF fromat
Parameter
---------
fname : string, fif file name <None>
fname : string, fif file name <None>
raw : raw obj <None>
raw : raw obj <None>
condition_list: list of conditions to process
select special conditions from epocher template
default: <None> , will process all defined in template
overwrite_hdf : flag for overwriting output HDF file <False>
template_path : path to jumeg epocher templates
template_name : name of template e.g: experimnet name
hdf_path : path to hdf file <None> if None use fif-file path
verbose : flag, <False>
debug : flag, <False>
Results
-------
raw obj
string: FIF file name
"""
#--- read template file
if template_name:
self.template_name = template_name
if template_path:
self.template_path = template_path
if verbose:
self.verbose = verbose
if debug:
self.debug = debug
self.template_update_file()
self.raw,fname = jumeg_base.get_raw_obj(fname,raw=raw)
#--- init obj
self.hdf_obj_init(raw=self.raw,hdf_path=hdf_path,overwrite=overwrite_hdf)
self.channel_events_to_dataframe()
if not condition_list :
condition_list = self.template_data.keys()
#--- condi loop
# for condi, param, in self.template_data.items():
for condi in condition_list:
param = self.template_data.get(condi)
#--- check if condi is defined
if not param:
msg = "---> no condition key found in template data\n"
msg+= " -> condition: {}\n".format(condi)
msg+= " -> template file: {}\n".format(self.template_name)
if self.debug:
msg+=" -> template data:\n"+ self.pp_list2str(self.template_data)
logger.exception(msg)
#if self.exit_error_in_condition:
#sys.exit()
#--- check for real condition
if condi == 'default': continue
#--- check for condition in list
if condi not in self.template_data.keys(): continue
#--- update & merge condi self.parameter with defaults
self.update_parameter(param=param)
iod_data_frame = None
logger.info("---> Epocher start store events into HDF\n --> condition: "+ condi)
if not self.marker.channel_parameter: continue
#--- stimulus init dict's & dataframes
marker_info = dict()
marker_data_frame = pd.DataFrame()
response_data_frame = pd.DataFrame()
response_info = dict()
window_data_frame = pd.DataFrame()
window_info = dict()
if self.verbose:
logger.info(' --> EPOCHER Template: %s Condition: %s' %(self.template_name,condi)+
'\n -> find events and epochs, save epocher output in HDF5 format')
#self.pp(self.parameter,head=" -> parameter")
#--- iod matching ckek if true and if channe == stimulus channel
if self.iod.iod_matching:
iod_data_frame,iod_info = self.apply_iod_matching(raw=self.raw)
if iod_data_frame is None: continue
marker_data_frame = iod_data_frame
marker_info = iod_info
#--- copy iod df to res or stim df
# if self.response.matching:
# if ( self.iod.marker.channel != self.marker.channel ):
# response_data_frame = iod_data_frame
# response_info = iod_info
#--- ck if not stimulus_data_frame
if marker_data_frame.empty :
logger.info(" -> MARKER CHANNEL -> find events => condition: "+ condi +"\n ---> marker channel: "+ self.marker.channel)
if self.verbose:
logger.info(self.pp_list2str( self.marker.parameter,head=" -> Marker Channel parameter:"))
marker_data_frame,marker_info = self.events_find_events(self.raw,prefix=self.marker.prefix,**self.marker.channel_parameter)
#---
if marker_data_frame.empty: continue
marker_data_frame['bads'] = 0
marker_data_frame['selected'] = 0
marker_data_frame['weighted_selected']= 0
if self.verbose:
logger.info(" -> Marker Epocher Events Data Frame [marker channel]: "+ condi)
#--- Marker Matching task
#--- match between stimulus and response or vice versa
#--- get all response events for condtion e.g. button press 4
#--- apply window mathing : find first,all responses in window
if self.response.matching :
logger.info(" --> Marker Matching -> matching marker & response channel: {}\n".format(condi)+
" -> marker channel : {}\n".format(self.marker.channel)+
" -> response channel : {}".format(self.response.channel) )
#--- look for all responses => 'event_id' = None
if response_data_frame.empty:
res_channel_param = deepcopy(self.response.channel_parameter)
res_channel_param['event_id'] = None
response_data_frame,response_info = self.events_find_events(self.raw,prefix=self.response.prefix,**res_channel_param)
if self.verbose:
logger.info(self.pp_list2str(self.response.parameter,
head="---> Response Epocher Events Data Frame [response channel] : " + self.response.channel))
#logger.info(marker_data_frame)
#--- update stimulus epochs with response matching
marker_data_frame = self.ResponseMatching.apply(raw = self.raw,
stim_df = marker_data_frame,
stim_param = deepcopy(self.marker.channel_parameter),
stim_type_input = self.marker.type_input,
stim_prefix = self.marker.prefix,
#---
resp_df = response_data_frame,
resp_param = deepcopy(self.response.channel_parameter),
resp_type_input = self.response.type_input,
resp_type_offset= self.response.type_offset,
resp_prefix = self.response.prefix,
#---
verbose = self.verbose
)
#--- window matching, find events in window
if self.window.matching:
logger.info("---> window matching => marker data frame:\n{}".format( marker_data_frame.to_string() ))
event_df = self.hdf_obj_get_channel_dataframe(self.window.stim_channel)
logger.info("---> window matching => event type: {}\n -> DataFrame:\n{}".format(self.window.event_type,event_df.to_string()))
if event_df.any:
window_data_frame = self.WindowMatching.apply(raw=self.raw,verbose=self.verbose,
marker_df = marker_data_frame,
window_onset = self.window.window_onset,
window_offset = self.window.window_offset,
event_df = event_df,
event_type = self.window.event_type
)
if self.verbose:
type = self.response.prefix +'_type'
hits = marker_data_frame[type]
idx = np.where( hits == self.rt_type_as_index( self.marker.type_result ) )[0]
msg=[" -> Response Matching DataFrame : " + condi,
" -> correct : {:d} / {:d}".format(len(idx),len(marker_data_frame.index)),
" -> marker type : {}".format(type),
"-"*40,
"{}".format( marker_data_frame.to_string() )
]
logger.info("\n".join(msg))
else:
#--- not response matching should be all e.g. hits
mrk_type = self.marker.prefix +'_type'
if mrk_type not in marker_data_frame :
marker_data_frame[ mrk_type ] = self.rt_type_as_index( self.marker.type_result )
if self.verbose:
mrk_type = self.marker.prefix +'_type'
hits = marker_data_frame[mrk_type]
idx = np.where( hits == self.rt_type_as_index( self.marker.type_result ) )[0]
msg=[" -> Marker Matching DataFrame : " + condi,
" -> correct : {:d} / {:d}".format(len(idx),len(marker_data_frame.index)),
" -> marker type : {}".format(mrk_type),
"-"*40,
"{}".format( marker_data_frame.to_string() )
]
logger.info("\n".join(msg))
key = self.hdf_node_name_epocher +'/'+condi
storer_attrs = {'epocher_parameter': self.parameter,'info_parameter':marker_info}
if self.window.matching:
self.hdf_obj_update_dataframe(window_data_frame.astype(np.int32),key=key,**storer_attrs)
else:
#--- marker dataframe
self.hdf_obj_update_dataframe(marker_data_frame.astype(np.int32),key=key,**storer_attrs )
self.HDFobj.close()
logger.info("---> DONE save epocher data into HDF5 : " + self.hdf_filename)
return self.raw,fname
#---
def events_find_events(self,raw,prefix=None,**param):
"""find events with <mne.find_events()>
Parameters
---------
raw : raw obj
prefix: prefix for columns <None>
param : parameter like <**kwargs>
{'event_id': 40, 'and_mask': 255,
'events': {'consecutive': True, 'output':'step','stim_channel': 'STI 014',
'min_duration':0.002,'shortest_event': 2,'mask': 0}
}
Returns
--------
pandas data-frame with epoch event structure for e.g. stimulus, response channel
id : event id
offset : np array with TSL event code offset
onset : np array with TSL event code onset
if <prefix> columns are labeled with <prefix>
e.g.: prefix=img => img_onset
dict() with event structure for stimulus or response channel
sfreq : sampling frequency => raw.info['sfreq']
duration : {mean,min,max} in TSL
system_delay_is_applied : True/False
--> if true <system_delay_ms> converted to TSLs and added to the TSLs in onset,offset
(TSL => timeslices,samples)
"""
if raw is None:
logger.error("ERROR in <get_event_structure: raw obj is None")
return
#---
df = pd.DataFrame(columns = self.data_frame_stimulus_cols)
ev_id_idx = np.array([])
ev_onset = np.array([])
ev_offset = np.array([])
#---add prefix to col name
if prefix:
for k in self.data_frame_stimulus_cols:
df.rename(columns={k: prefix+'_'+k},inplace=True )
col_id = prefix+"_id"
col_onset = prefix+"_onset"
col_offset= prefix+"_offset"
else:
col_id = "id"
col_onset = "onset"
col_offset= "offset"
#---
events = deepcopy(param['events'])
events['output'] = 'step'
# self.pp( events )
#--- check if channel label in raw
if not jumeg_base.picks.labels2picks(raw,events["stim_channel"]):
return df,dict()
if self.verbose:
logger.debug("---> mne.find_events:\n"+ self.pp_list2str(events))
ev = mne.find_events(raw, **events) #-- return int64
# self.pp(ev)
#--- apply and mask e.g. 255 get the first 8 bits in Trigger channel
if param['and_mask']:
ev[:, 1:] = np.bitwise_and(ev[:, 1:], param['and_mask'])
ev[:, 2:] = np.bitwise_and(ev[:, 2:], param['and_mask'])
ev_onset = np.squeeze( ev[np.where( ev[:,2] ),:]) # > 0
ev_offset = np.squeeze( ev[np.where( ev[:,1] ),:])
if param['event_id']:
ev_id = jumeg_base.str_range_to_numpy(param['event_id'],exclude_zero=True)
#print(ev_id)
evt_ids=np.where(np.in1d(ev[:,2],ev_id))
#print(evt_ids)
#--- check if code in events
if len( np.squeeze(evt_ids) ):
ev_id_idx = np.squeeze( np.where( np.in1d( ev_onset[:,2],ev_id )))
if ( ev_id_idx.size > 0 ):
ev_onset = ev_onset[ ev_id_idx,:]
ev_offset= ev_offset[ev_id_idx,:]
else:
logger.warning('===> No such event code(s) found (ev_id_idx) -> event: ' + str( param['event_id'] ))
return
else:
logger.warning('===> No such event code(s) found (ev_id) -> event: ' + str(param['event_id']))
return
#---- use all event ids
if ( ev_onset.size == 0 ):
logger.warning('===> No such event code(s) found -> event: ' + str(param['event_id']))
return
#--- apply system delay if is defined e.g. auditory take`s 20ms to subjects ears
if param['system_delay_ms']:
system_delay_tsl = raw.time_as_index( param['system_delay_ms']/1000 ) # calc in sec
ev_onset[:, 0] += system_delay_tsl
ev_offset[:, 0]+= system_delay_tsl
system_delay_is_applied = True
else:
system_delay_is_applied = False
#-- avoid invalid index/dimension error if last offset is none
df[col_id] = ev_onset[:,2]
df[col_onset] = ev_onset[:,0]
df[col_offset] = np.zeros( ev_onset[:,0].size,dtype=np.long )
div = np.zeros( ev_offset[:,0].size )
try:
if ( ev_onset[:,0].size >= ev_offset[:,0].size ):
div = ev_offset[:,0] - ev_onset[:ev_offset[:,0].size,0]
df[col_offset][:ev_offset[:,0].size] = ev_offset[:,0]
else:
idx_max = ev_offset[:,0].size
div = ev_offset[:,0] - ev_onset[:idx_max,0]
df[col_offset][:] = ev_offset[:idx_max,0]
except:
assert "ERROR dims onset offset will not fit\n"
return df,dict( {
'sfreq' : raw.info['sfreq'],
'duration' :{'mean':np.rint(div.mean()),'min':div.min(),'max':div.max()},
'system_delay_is_applied' : system_delay_is_applied
} )
jumeg_epocher_events = JuMEG_Epocher_Events()
| 40.140322
| 159
| 0.539331
|
56a9f69718f000d2e1f319317944f285ef18e9f4
| 4,524
|
py
|
Python
|
bulletin_board/views.py
|
onursahil/BERT_search_engine
|
50b10ee3bbaeceeb5879a76ef43463cfa7132ca2
|
[
"MIT"
] | null | null | null |
bulletin_board/views.py
|
onursahil/BERT_search_engine
|
50b10ee3bbaeceeb5879a76ef43463cfa7132ca2
|
[
"MIT"
] | null | null | null |
bulletin_board/views.py
|
onursahil/BERT_search_engine
|
50b10ee3bbaeceeb5879a76ef43463cfa7132ca2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# from .models import Post
from django.shortcuts import redirect
import datetime
import pandas as pd
import numpy as np
import re
import fasttext
import fasttext.util
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk, scan
from elasticsearch_dsl import Search
from elasticsearch_dsl import Q
from ast import literal_eval
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
# Create your views here.
def home(request):
return render(request, 'bulletin_board/home.html')
def results(request):
search_q = request.GET.get('search')
search_query = []
search_keyword = {
'keyword': search_q
}
search_query.append(search_keyword)
fasttext.util.download_model('tr', if_exists='ignore')
try:
ft = fasttext.load_model('cc.tr.300.bin')
except:
print("Couldn't load the model")
# DOCUMENT SEARCH
documents = document_search(search_q, ft)
print("\n")
print("DOCUMENT SEARCH RESULT")
print(documents)
# QUESTION & ANSWER
answer, score = question_answer(search_q, documents)
answer = answer.capitalize()
answer_query = []
answer_d = {
'answer_key': answer,
'score': score
}
answer_query.append(answer_d)
print("\n")
display_data = []
for index, row in documents.iterrows():
document_dict = {}
document_dict['document_id'] = row['document_id']
document_dict['topic_id'] = row['topic_id']
document_dict['document_name'] = row['document_name']
document_dict['topic'] = row['topic']
document_dict['content'] = row['content']
document_dict['score'] = row['score']
display_data.append(document_dict)
context = {
'posts': display_data,
'search_q': search_query,
'answer_key': answer_query
}
return render(request, 'bulletin_board/results.html', context)
def question_answer(input_query, document_search_result):
model = AutoModelForQuestionAnswering.from_pretrained("kuzgunlar/electra-turkish-qa")
tokenizer = AutoTokenizer.from_pretrained("kuzgunlar/electra-turkish-qa")
qa=pipeline('question-answering', model=model, tokenizer=tokenizer)
qa_content = ' '.join(row['topic_content_result'] for index, row in document_search_result[:5].iterrows())
qa_result = qa(question=input_query, context=qa_content)
return qa_result['answer'], qa_result['score']
def semantic_search(input_query):
script_query = {
"script_score": {
"query": {
"match_all": {}
},
"script": {
"source": "cosineSimilarity(params.query_vector, 'topic_content_vector') + 1.0",
"params": {
"query_vector": input_query
}
}
}
}
client = Elasticsearch(hosts=["localhost"], http_auth=('elastic', 'changeme'))
response = client.search(
index='akbank_data',
body={
"query": script_query
},
size=999
)
all_hits = response['hits']['hits']
document_id_list = []
topic_id_list = []
document_name_list = []
topic = []
content = []
topic_content_result = []
search_score = []
for i in range(len(all_hits)):
score = all_hits[i]['_score']
document_id = all_hits[i]['_source']
document_id = document_id['document_id']
topic_id = all_hits[i]['_source']
topic_id = topic_id['topic_id']
document_name = all_hits[i]['_source']
document_name = document_name['document_name']
content_text = all_hits[i]['_source']
content_text = content_text['content']
topic_text = all_hits[i]['_source']
topic_text = topic_text['topic']
topic_content = all_hits[i]['_source']
topic_content = topic_content['topic_content']
document_id_list.append(document_id)
topic_id_list.append(topic_id)
document_name_list.append(document_name)
topic.append(topic_text)
content.append(content_text)
topic_content_result.append(topic_content)
search_score.append(score)
document_search_df = pd.DataFrame()
document_search_df['document_id'] = document_id_list
document_search_df['topic_id'] = topic_id_list
document_search_df['document_name'] = document_name_list
document_search_df['topic'] = topic
document_search_df['content'] = content
document_search_df['topic_content_result'] = topic_content_result
document_search_df['score'] = search_score
return document_search_df
def document_search(keyword, ft):
input_query_vector = ft.get_sentence_vector(keyword)
document_search_result = semantic_search(input_query_vector)
return document_search_result
| 27.925926
| 107
| 0.717507
|
e661afbb08e56d09c4108b37bac00e8d6df3a96d
| 1,236
|
py
|
Python
|
locking/forms.py
|
edufrick/django-locking
|
a342422114ab011b97fd2d91f432453822d0b1f8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
locking/forms.py
|
edufrick/django-locking
|
a342422114ab011b97fd2d91f432453822d0b1f8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
locking/forms.py
|
edufrick/django-locking
|
a342422114ab011b97fd2d91f432453822d0b1f8
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
""" locking.forms
Forms class for the django-locking project.
"""
from __future__ import absolute_import
from django import forms
from django.contrib.contenttypes.models import ContentType
from locking.models import Lock
class LockingForm(forms.ModelForm):
"""
Clean the form to enforce orm locking before saving the object. This will
only work if you set lock_type to 'hard' in the locking.models file.
"""
def clean(self):
self.cleaned_data = super(LockingForm, self).clean()
try:
content_type = ContentType.objects.get_for_model(self.obj)
lock = Lock.objects.get(
entry_id=self.obj.id,
app=content_type.app_label,
model=content_type.model,
)
except Exception:
return self.cleaned_data
if lock.is_locked:
if lock.locked_by != self.request.user and lock.lock_type == "hard":
raise forms.ValidationError(
"You cannot save this object because it is locked by user %s for roughly %s more minute(s)."
% (lock.locked_by.username, lock.lock_seconds_remaining // 60)
)
return self.cleaned_data
| 32.526316
| 112
| 0.631068
|
1dbd01beddffd993e69a3158910183d0e606d10c
| 7,306
|
py
|
Python
|
parallel_RRT/parallel_RRT_test_Dask_numba.py
|
AnsSUN/Parallel-Computing-of-Rapidly-exploring-random-tree-RRT-
|
4ec2c37f8cd214127421f90f2a0ce2c275f37d94
|
[
"MIT"
] | 3
|
2019-11-07T06:30:48.000Z
|
2021-06-06T08:49:36.000Z
|
parallel_RRT/parallel_RRT_test_Dask_numba.py
|
AnsSUN/Parallel-Computing-of-Rapidly-exploring-random-tree-RRT-
|
4ec2c37f8cd214127421f90f2a0ce2c275f37d94
|
[
"MIT"
] | 1
|
2021-03-01T08:37:04.000Z
|
2021-03-05T23:06:17.000Z
|
parallel_RRT/parallel_RRT_test_Dask_numba.py
|
AnsSUN/Parallel-Computing-of-Rapidly-exploring-random-tree-RRT-
|
4ec2c37f8cd214127421f90f2a0ce2c275f37d94
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from numba import cuda
from tqdm import tqdm
import matplotlib.pyplot as plt
TPB = 256 # number of threads in a block
# device function_NN
@cuda.jit(device=True)
def euc_distance_2d_device(x1,y1,x2,y2):
d = math.sqrt((x2-x1)**2+(y2-y1)**2)
return d
@cuda.jit()
def distanceKernel(d_out,x,y,d_V,nov):
i = cuda.blockIdx.x*cuda.blockDim.x + cuda.threadIdx.x
if i < nov:
d_out[i] = euc_distance_2d_device(x,y,d_V[0,i],d_V[1,i])
# wrapper function_NN
def dArray(x,y,V,nov):
d_V = cuda.to_device(V) # copies the input data to a device array on the GPU
d_distance = cuda.device_array(nov) # creates an empty array to hold the output
BPG = (nov + TPB - 1)//TPB # computes number of blocks
distanceKernel[BPG,TPB](d_distance,x,y,d_V,nov)
return d_distance.copy_to_host()
# kernel for CC
@cuda.jit()
def ccKernel(d_out,x,y,d_O,all_radii):
i = cuda.blockIdx.x*cuda.blockDim.x + cuda.threadIdx.x
#flag = 1
if i < all_radii.size:# and flag ==1:
d_out[i] = euc_distance_2d_device(x,y,d_O[0,i],d_O[1,i])>all_radii[i] # should be 1 for no collision
# flag = d_out[i]
# wrapper function_CC
def dArray_CC(x,y,obs_coors,allowable_radii):
noo = allowable_radii.size
d_all_radii = cuda.to_device(allowable_radii) # copies the input data to a device array on the GPU
d_O = cuda.to_device(obs_coors) # copies the input data to a device array on the GPU
d_collision = cuda.device_array(noo) # creates an empty array to hold the output
BPG = (noo + TPB - 1)//TPB # computes number of blocks
ccKernel[BPG,TPB](d_collision,x,y,d_O,d_all_radii)
return d_collision.copy_to_host()
def euc_distance_2d(x1,y1,x2,y2):
return np.sqrt((x2-x1)**2+(y2-y1)**2)
def nearest_neighbor_2d_parallel(x,y,V,nov):
distance = dArray(x,y,V,nov)
ind_min = np.argmin(distance)
min_dis = distance[ind_min]
return [min_dis,ind_min]
def collision_check_parallel(x,y,obstacle_coordinates,obstacle_radii):
allowable_radii = obstacle_radii*2/np.sqrt(3)
flag = 0 # means no collision
if all(dArray_CC(x,y,obstacle_coordinates,allowable_radii)):
flag = 1
return flag
def draw_circle(xc,yc,r):
t = np.arange(0,2*np.pi,.05)
x = xc+r*np.sin(t)
y = yc+r*np.cos(t)
plt.plot(x,y,c='blue')
#@cuda.jit(device=True)
def main(num_tree):
max_iter = 800
epsilon = 2 # step size
flag = 0 # for finding a connectivity path
# initial and goal points/states
x0 = 10
y0 = 10
x_goal = 90
y_goal = 90
plt.figure(figsize=[10,10])
plt.scatter([x0,x_goal],[y0,y_goal],c='r',marker="P")
# obstacle info
noo = 16 # no. of obstacles
radius = np.sqrt(3)/2*epsilon
obs_radii = radius*np.ones(noo)
obs_coors = 100*np.random.rand(2,noo) # position of obstacles
for i in range(0,noo):
draw_circle(obs_coors[0,i],obs_coors[1,i],obs_radii[i])
if euc_distance_2d(x0,y0,x_goal,y_goal)<epsilon:
flag = 1
plt.plot([x0,x_goal],[y0,y_goal],c='black')
else:
vertices = np.zeros([2,max_iter+1])
A = -np.ones([max_iter+1,max_iter+1])
vertices[0,0] = x0
vertices[1,0] = y0
A[0,0] = 0
nov = 0 # no. of vertices except the initial one
i = 0
while flag==0 and i<max_iter:
i += 1
x_rand= 100*np.random.rand(1)
y_rand= 100*np.random.rand(1)
xy_rand = np.array([x_rand,y_rand]).reshape(2,)
[min_dis,p_near] = nearest_neighbor_2d_parallel(x_rand[0],y_rand[0],vertices,nov+1)
if min_dis<epsilon:
x_new = x_rand
y_new = y_rand
else: # interpolate
r = epsilon/min_dis # ratio
x_new = vertices[0,p_near]+r*(x_rand-vertices[0,p_near])
y_new = vertices[1,p_near]+r*(y_rand-vertices[1,p_near])
if collision_check_parallel(x_new[0],y_new[0],obs_coors,obs_radii):
nov = nov+1
vertices[0,nov] = x_new
vertices[1,nov] = y_new
plt.scatter(x_new,y_new,c='g')
plt.plot([vertices[0,p_near],x_new],[vertices[1,p_near],y_new],c='black')
A[nov,:] = A[p_near,:]
A[nov,nov] = nov
if euc_distance_2d(x_new,y_new,x_goal,y_goal)<epsilon:
nov = nov+1
A[nov,:] = A[nov-1,:]
A[nov,nov] = nov
vertices[0,nov] = x_goal
vertices[1,nov] = y_goal
plt.plot([x_new,x_goal],[y_new,y_goal],c='black')
flag = 1
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.axis('scaled')
if flag ==1 and nov!=0:
B = np.zeros(nov)
nov_path =0 # no. of vertices on the connectivity path
for i in range(0,nov+1):
if A[nov,i]>-1:
B[nov_path]=A[nov,i]
nov_path += 1
B = B[0:nov_path]
for i in range(0, B.size-1):
plt.plot([vertices[0,int(B[i])],vertices[0,int(B[i+1])]],[vertices[1,int(B[i])],vertices[1,int(B[i+1])]],c='yellow',linewidth=7,alpha=0.5)
elif flag ==0:
flag = 0
print('No solution has been found for the given maximum number of iterations.')
else:
print('The initial and goal configurations are close enough.')
plt.plot([x0,x_goal],[y0,y_goal],c='yellow',linewidth=7,alpha=0.5)
plt.savefig("parralel_tree"+str(num_tree+1)+".png")
#plt.show()
#for i in range(10):
# main()
"""
@cuda.jit
def kernel_euler_method(d_diff, d_f, d_stencil):
i= cuda.grid(1)
n= d_diff.shape[0]
d_diff[0] = 1
d_diff[i] = d_f[i] - d_f[i]*(d_f[i]-d_f[i-1])*d_stencil[0]
def wrap_fun_diff(f, dx):
n=f.shape[0]
c=1.0
dt=0.02
stencil = np.array([(dt/dx)])
d_stencil = cuda.to_device(stencil)
d_f = cuda.to_device(f)
d_diff = cuda.device_array_like(d_f)
blocks, threads = (n+TPB-1)//TPB, TPB
kernel_euler_method[blocks, threads](d_diff, d_f, stencil)
return d_diff.copy_to_host()
"""
"""
@cuda.jit
def kernel_para_RRT(d_diff):
i= cuda.blockIdx.x*cuda.blockDim.x + cuda.threadIdx.x
if i < d_diff.shape[0]:
d_diff[i] = 1
#main()
start = cuda.event()
stop = cuda.event()
starts = []
events = 10
for i in range(events):
starts.append(cuda.event())
streams = []
for i in range(events):
n = np.ones(10)
num = 10
#d_f = cuda.to_device(n)
d_diff = cuda.device_array_like(n)
streams.append(cuda.stream)
starts[i].record(streams[-1])
blocks, threads = (num+TPB-1)//TPB, TPB
kernel_para_RRT[blocks, threads, streams[-1]](d_diff)
main()
stop.record()
tot_time = cuda.event_elapsed_time(start, stop)
print(" Time taken for RRT: ", i, "is: ", tot_time,"ms")
#synchronize()
"""
#for s in streams:
# s.synchronize()
#for i in events:
# cuda.event_elapsed_time(start[i], end[i])
#main()
import dask
import timeit
import time
from dask import delayed
main = delayed(main)
for n in range(16):
start = time.time()
_ = main(n).compute()
tot_time = (time.time()-start)
print("Time required for calculating tree : ", n+1, "is: ", tot_time, " sec")
#stream = cuda.stream()
#ccKernel[blocks, threads, stream]
#stream.synchronize()
| 30.065844
| 150
| 0.615521
|
0ef26e92f4efb903b5fbd3a001ef20823bbf2ce0
| 1,137
|
py
|
Python
|
svpv/pedigree.py
|
VCCRI/SVPV
|
f902b2b8eea3bae1d4e9b097ea76e08fe41d5400
|
[
"MIT"
] | 30
|
2016-09-16T02:33:20.000Z
|
2021-06-03T01:06:29.000Z
|
svpv/pedigree.py
|
rhshah/SVPV
|
f902b2b8eea3bae1d4e9b097ea76e08fe41d5400
|
[
"MIT"
] | 2
|
2018-05-29T09:18:36.000Z
|
2021-12-17T09:45:25.000Z
|
svpv/pedigree.py
|
rhshah/SVPV
|
f902b2b8eea3bae1d4e9b097ea76e08fe41d5400
|
[
"MIT"
] | 10
|
2017-02-28T06:15:22.000Z
|
2022-02-02T13:45:39.000Z
|
from __future__ import print_function
# read a pedigree and store info in a dictionary
class Pedigree:
def __init__(self, ped, samples):
# dict of list of samples in family
self.samples_by_family = {}
# dict of family by sample
self.families_by_sample = {}
# all info from ped file
self.samples = {}
for line in ped:
if line[0] == '#' or line == '\n':
continue
try:
fam, iid, fid, mid = line.split('\t')[0:4]
except ValueError:
print('Line in pedigree incorrectly formatted:\n"{}"\n'.format(line))
continue
if iid not in samples:
continue
if fam in self.samples_by_family:
self.samples_by_family[fam].append(iid)
else:
self.samples_by_family[fam] = [iid]
self.families_by_sample[iid] = fam
self.samples[iid] = (fam, iid, fid, mid)
for s in samples:
if s not in self.samples:
print('Warning: sample {} not in pedigree\n'.format(s))
| 35.53125
| 85
| 0.533861
|
ea5fb5b74d4b1c397f7eb01f77305c582af74312
| 11,630
|
py
|
Python
|
backtesting/agent/market_makers/SpreadBasedMarketMakerAgent.py
|
andrewsonin/abides_dev
|
e8a9c8450bbbe98597f31767362c86eb193597a0
|
[
"BSD-3-Clause"
] | null | null | null |
backtesting/agent/market_makers/SpreadBasedMarketMakerAgent.py
|
andrewsonin/abides_dev
|
e8a9c8450bbbe98597f31767362c86eb193597a0
|
[
"BSD-3-Clause"
] | null | null | null |
backtesting/agent/market_makers/SpreadBasedMarketMakerAgent.py
|
andrewsonin/abides_dev
|
e8a9c8450bbbe98597f31767362c86eb193597a0
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import namedtuple, deque
import pandas as pd
from backtesting.agent.TradingAgent import TradingAgent
from backtesting.utils.util import ignored
from backtesting.utils.util import log_print
ANCHOR_TOP_STR = 'top'
ANCHOR_BOTTOM_STR = 'bottom'
class SpreadBasedMarketMakerAgent(TradingAgent):
""" This class implements the Chakraborty-Kearns `ladder` market-making strategy. """
_Order = namedtuple('_Order', ['price', 'id']) # Internal data structure used to describe a placed order
def __init__(self, id, name, type, symbol, starting_cash, order_size=1, window_size=5, anchor=ANCHOR_BOTTOM_STR,
num_ticks=20, wake_up_freq='1s', subscribe=True, subscribe_freq=10e9, subscribe_num_levels=1,
log_orders=False, random_state=None):
super().__init__(id, name, random_state=random_state, starting_cash=starting_cash, log_orders=log_orders)
self.symbol = symbol # Symbol traded
self.order_size = order_size # order size per price level
self.window_size = window_size # Size in ticks (cents) of how wide the window around mid price is
self.anchor = self.validateAnchor(anchor) # anchor either top of window or bottom of window to mid-price
self.num_ticks = num_ticks # number of ticks on each side of window in which to place liquidity
self.wake_up_freq = wake_up_freq # Frequency of agent wake up
self.subscribe = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
self.subscribe_freq = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
# in subscribe mode
self.subscribe_num_levels = subscribe_num_levels # Number of orderbook levels in subscription mode
self.log_orders = log_orders
## Internal variables
self.subscription_requested = False
self.state = "AWAITING_WAKEUP"
self.current_bids = None # double-ended queue holding bid orders in the book
self.current_asks = None # double-ended queue holding ask orders in the book
self.last_mid = None # last observed mid price
self.order_id_counter = 0 # counter for bookkeeping orders made by self
self.LIQUIDITY_DROPOUT_WARNING = f"Liquidity dropout for agent {self.name}."
def validateAnchor(self, anchor):
""" Checks that input parameter anchor takes allowed value, raises ValueError if not.
:param anchor: str
:return:
"""
if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR]:
raise ValueError(f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}` or `{ANCHOR_TOP_STR}`")
else:
return anchor
def kernelStarting(self, startTime):
super().kernelStarting(startTime)
def wakeup(self, currentTime):
""" Agent wakeup is determined by self.wake_up_freq """
can_trade = super().wakeup(currentTime)
if self.subscribe and not self.subscription_requested:
super().requestDataSubscription(self.symbol, levels=self.subscribe_num_levels, freq=self.subscribe_freq)
self.subscription_requested = True
self.state = 'AWAITING_MARKET_DATA'
elif can_trade and not self.subscribe:
self.getCurrentSpread(self.symbol, depth=self.subscribe_num_levels)
self.state = 'AWAITING_SPREAD'
def receiveMessage(self, currentTime, msg):
""" Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.
:param simulation current time
:param message received by self from ExchangeAgent
:type currentTime: pd.Timestamp
:type msg: str
:return:
"""
super().receiveMessage(currentTime, msg)
if self.last_mid is not None:
mid = self.last_mid
if not self.subscribe and self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD':
bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
if bid and ask:
mid = int((ask + bid) / 2)
else:
log_print(f"SPREAD MISSING at time {currentTime}")
orders_to_cancel = self.computeOrdersToCancel(mid)
self.cancelOrders(orders_to_cancel)
self.placeOrders(mid)
self.setWakeup(currentTime + self.getWakeFrequency())
self.state = 'AWAITING_WAKEUP'
self.last_mid = mid
elif self.subscribe and self.state == 'AWAITING_MARKET_DATA' and msg.body['msg'] == 'MARKET_DATA':
bid = self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None
ask = self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None
if bid and ask:
mid = int((ask + bid) / 2)
else:
log_print(f"SPREAD MISSING at time {currentTime}")
return
orders_to_cancel = self.computeOrdersToCancel(mid)
self.cancelOrders(orders_to_cancel)
self.placeOrders(mid)
self.state = 'AWAITING_MARKET_DATA'
self.last_mid = mid
def computeOrdersToCancel(self, mid):
""" Given a mid price, computes the orders that need to be removed from orderbook, and pops these orders from
bid and ask deques.
:param mid: mid-price
:type mid: int
:return:
"""
orders_to_cancel = []
if (self.current_asks is None) or (self.current_bids is None):
return orders_to_cancel
num_ticks_to_increase = int(mid - self.last_mid)
if num_ticks_to_increase > 0:
for _ in range(num_ticks_to_increase):
with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
orders_to_cancel.append(self.current_bids.popleft())
with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
orders_to_cancel.append(self.current_asks.popleft())
elif num_ticks_to_increase < 0:
for _ in range(- num_ticks_to_increase):
with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
orders_to_cancel.append(self.current_bids.pop())
with ignored(self.LIQUIDITY_DROPOUT_WARNING, IndexError):
orders_to_cancel.append(self.current_asks.pop())
return orders_to_cancel
def cancelOrders(self, orders_to_cancel):
""" Given a list of _Order objects, remove the corresponding orders from ExchangeAgent's orderbook
:param orders_to_cancel: orders to remove from orderbook
:type orders_to_cancel: list(_Order)
:return:
"""
for order_tuple in orders_to_cancel:
order_id = order_tuple.id
try:
order = self.orders[order_id]
self.cancelOrder(order)
except KeyError:
continue
def computeOrdersToPlace(self, mid):
""" Given a mid price, computes the orders that need to be removed from orderbook, and adds these orders to
bid and ask deques.
:param mid: mid-price
:type mid: int
:return:
"""
bids_to_place = []
asks_to_place = []
if (not self.current_asks) or (not self.current_bids):
self.cancelAllOrders()
self.initialiseBidsAsksDeques(mid)
bids_to_place.extend([order for order in self.current_bids])
asks_to_place.extend([order for order in self.current_asks])
return bids_to_place, asks_to_place
if self.last_mid is not None:
num_ticks_to_increase = int(mid - self.last_mid)
else:
num_ticks_to_increase = 0
if num_ticks_to_increase > 0:
base_bid_price = self.current_bids[-1].price
base_ask_price = self.current_asks[-1].price
for price_increment in range(1, num_ticks_to_increase + 1):
bid_price = base_bid_price + price_increment
new_bid_order = self.generateNewOrderId(bid_price)
bids_to_place.append(new_bid_order)
self.current_bids.append(new_bid_order)
ask_price = base_ask_price + price_increment
new_ask_order = self.generateNewOrderId(ask_price)
asks_to_place.append(new_ask_order)
self.current_asks.append(new_ask_order)
elif num_ticks_to_increase < 0:
base_bid_price = self.current_bids[0].price
base_ask_price = self.current_asks[0].price
for price_increment in range(1, 1 - num_ticks_to_increase):
bid_price = base_bid_price - price_increment
new_bid_order = self.generateNewOrderId(bid_price)
bids_to_place.append(new_bid_order)
self.current_bids.appendleft(new_bid_order)
ask_price = base_ask_price - price_increment
new_ask_order = self.generateNewOrderId(ask_price)
asks_to_place.append(new_ask_order)
self.current_asks.appendleft(new_ask_order)
return bids_to_place, asks_to_place
def placeOrders(self, mid):
""" Given a mid-price, compute new orders that need to be placed, then send the orders to the Exchange.
:param mid: mid-price
:type mid: int
"""
bid_orders, ask_orders = self.computeOrdersToPlace(mid)
for bid_order in bid_orders:
log_print(f'{self.name}: Placing BUY limit order of size {self.order_size} @ price {bid_order.price}')
self.placeLimitOrder(self.symbol, self.order_size, True, bid_order.price, order_id=bid_order.id)
for ask_order in ask_orders:
log_print(f'{self.name}: Placing SELL limit order of size {self.order_size} @ price {ask_order.price}')
self.placeLimitOrder(self.symbol, self.order_size, False, ask_order.price, order_id=ask_order.id)
def initialiseBidsAsksDeques(self, mid):
""" Initialise the current_bids and current_asks object attributes, which internally keep track of the limit
orders sent to the Exchange.
:param mid: mid-price
:type mid: int
"""
if self.anchor == ANCHOR_BOTTOM_STR:
highest_bid = int(mid - 1)
lowest_ask = int(mid + self.window_size)
elif self.anchor == ANCHOR_TOP_STR:
highest_bid = int(mid - self.window_size)
lowest_ask = int(mid + 1)
lowest_bid = highest_bid - self.num_ticks
highest_ask = lowest_ask + self.num_ticks
self.current_bids = deque([self.generateNewOrderId(price) for price in range(lowest_bid, highest_bid + 1)])
self.current_asks = deque([self.generateNewOrderId(price) for price in range(lowest_ask, highest_ask + 1)])
def generateNewOrderId(self, price):
""" Generate a _Order object for a particular price level
:param price:
:type price: int
"""
self.order_id_counter += 1
order_id = f"{self.name}_{self.id}_{self.order_id_counter}"
return self._Order(price, order_id)
def getWakeFrequency(self):
""" Get time increment corresponding to wakeup period. """
return pd.Timedelta(self.wake_up_freq)
def cancelAllOrders(self):
""" Cancels all resting limit orders placed by the market maker """
for _, order in self.orders.items():
self.cancelOrder(order)
| 40.950704
| 117
| 0.646862
|
462507b5f5688bff89ba3207f9f9f835fb5f7313
| 9,121
|
py
|
Python
|
onmt/Dataset.py
|
DanSchum/NMTGMinor
|
ebcf33cc77b7c2bc73192f1975b99487db0ebc8a
|
[
"MIT"
] | null | null | null |
onmt/Dataset.py
|
DanSchum/NMTGMinor
|
ebcf33cc77b7c2bc73192f1975b99487db0ebc8a
|
[
"MIT"
] | null | null | null |
onmt/Dataset.py
|
DanSchum/NMTGMinor
|
ebcf33cc77b7c2bc73192f1975b99487db0ebc8a
|
[
"MIT"
] | null | null | null |
from __future__ import division
import math
import torch
import sys
from torch.autograd import Variable
import onmt
class Batch(object):
def __init__(self, src_data, tgt_data=None,
src_align_right=False, tgt_align_right=False):
self.tensors = dict()
self.has_target = False
self.tensors['source'], self.src_lengths = self.join_data(src_data, align_right=src_align_right)
self.tensors['source'] = self.tensors['source'].t().contiguous()
self.tensors['src_attn_mask'] = self.tensors['source'].eq(onmt.Constants.PAD).unsqueeze(1)
self.tensors['src_pad_mask'] = self.tensors['source'].ne(onmt.Constants.PAD)
if tgt_data is not None:
target_full, self.tgt_lengths = self.join_data(tgt_data, align_right=tgt_align_right)
target_full = target_full.t().contiguous()
self.tensors['target_input'] = target_full[:-1]
self.tensors['target_output'] = target_full[1:]
self.tensors['tgt_pad_mask'] = self.tensors['target_input'].ne(onmt.Constants.PAD)
self.tensors['tgt_attn_mask'] = self.tensors['target_input'].ne(onmt.Constants.PAD)
self.tensors['tgt_mask'] = self.tensors['target_output'].ne(onmt.Constants.PAD)
self.has_target = True
self.size = len(src_data)
self.tgt_size = sum([len(x) - 1 for x in tgt_data])
self.src_size = sum([len(x) for x in src_data])
def join_data(self, data, align_right=False):
lengths = [x.size(0) for x in data]
max_length = max(lengths)
# initialize with batch_size * length first
tensor = data[0].new(len(data), max_length).fill_(onmt.Constants.PAD)
for i in range(len(data)):
data_length = data[i].size(0)
offset = max_length - data_length if align_right else 0
tensor[i].narrow(0, offset, data_length).copy_(data[i])
return tensor, lengths
def get(self, name):
if name in self.tensors:
return self.tensors[name]
else:
return None
def cuda(self):
if onmt.Constants.cudaActivated:
for key, value in self.tensors.items():
self.tensors[key] = value.cuda()
class Dataset(object):
'''
batchSize is now changed to have word semantic (probably better)
'''
def __init__(self, srcData, tgtData, batchSize, gpus,
data_type="text", balance=False, max_seq_num=128,
multiplier=1, pad_count=False, sort_by_target=False):
self.src = srcData
self._type = data_type
if tgtData:
self.tgt = tgtData
assert(len(self.src) == len(self.tgt))
else:
self.tgt = None
self.cuda = (len(gpus) > 0)
self.fullSize = len(self.src)
self.n_gpu = len(gpus)
self.batchSize = batchSize
self.balance = balance
self.max_seq_num = max_seq_num
# ~ print(self.max_seq_num)
self.multiplier = multiplier
self.sort_by_target = sort_by_target
self.pad_count = pad_count
# if self.balance:
self.allocateBatch()
self.cur_index = 0
self.batchOrder = None
# This function allocates the mini-batches (grouping sentences with the same size)
def allocateBatch(self):
# The sentence pairs are sorted by source already (cool)
self.batches = []
cur_batch = []
cur_batch_size = 0
cur_batch_sizes = []
def oversize_(cur_batch):
#Check if the length of current batch cur_batch is already exceeding the max_seq_num
if len(cur_batch) == self.max_seq_num:
return True
oversized = False
if self.pad_count == False:
if ( cur_batch_size + sentence_length > self.batchSize ):
return True
else:
# here we assume the new sentence's participation in the minibatch
longest_length = sentence_length
if len(cur_batch_sizes) > 0:
longest_length = max(max(cur_batch_sizes), sentence_length)
if longest_length * (len(cur_batch)+1) > self.batchSize:
return True
return False
i = 0
while i < self.fullSize:
sentence_length = self.tgt[i].size(0) - 1 if self.sort_by_target else self.src[i].size(0)
oversized = oversize_(cur_batch)
# if the current length makes the batch exceeds
# the we create a new batch
if oversized:
current_size = len(cur_batch)
scaled_size = max(
self.multiplier * (current_size // self.multiplier),
current_size % self.multiplier)
# ~ print(cur_batch)
batch_ = cur_batch[:scaled_size]
# ~ print(batch_)
# ~ print(len(batch_))
if self.multiplier > 1:
assert(len(batch_) % self.multiplier == 0, "batch size is not multiplied, current batch_size is %d " % len(batch_))
self.batches.append(batch_) # add this batch into the batch list
cur_batch = cur_batch[scaled_size:] # reset the current batch
cur_batch_sizes = cur_batch_sizes[scaled_size:]
cur_batch_size = sum(cur_batch_sizes)
cur_batch.append(i)
cur_batch_size += sentence_length
cur_batch_sizes.append(sentence_length)
i = i + 1
# catch the last batch
if len(cur_batch) > 0:
self.batches.append(cur_batch)
self.numBatches = len(self.batches)
def _batchify(self, data, align_right=False,
include_lengths=False, dtype="text"):
lengths = [x.size(0) for x in data]
max_length = max(lengths)
out = data[0].new(len(data), max_length).fill_(onmt.Constants.PAD)
for i in range(len(data)):
data_length = data[i].size(0)
offset = max_length - data_length if align_right else 0
out[i].narrow(0, offset, data_length).copy_(data[i])
if include_lengths:
return out, lengths
else:
return out
def __getitem__(self, index):
assert index < self.numBatches, "%d > %d" % (index, self.numBatches)
batch = self.batches[index]
srcData = [self.src[i] for i in batch]
if self.tgt:
tgtData = [self.tgt[i] for i in batch]
else:
tgtData = None
print('Tgt data not found.')
batch = Batch(srcData, tgt_data=tgtData, src_align_right=False, tgt_align_right=False)
return batch
def __len__(self):
return self.numBatches
def create_order(self, random=True):
if random:
self.batchOrder = torch.randperm(self.numBatches)
else:
self.batchOrder = torch.arange(self.numBatches).long()
self.cur_index = 0
return self.batchOrder
def next(self, curriculum=False, reset=True, split_sizes=1):
# reset iterator if reach data size limit
if self.cur_index >= self.numBatches:
if reset:
self.cur_index = 0
else: return None
if curriculum or self.batchOrder is None:
batch_index = self.cur_index
else:
batch_index = self.batchOrder[self.cur_index]
batch = self[batch_index]
# move the iterator one step
self.cur_index += 1
#split that batch to number of gpus
samples = []
split_size = 1
# maybe we need a more smart splitting function ?
# if batch[1] is not None:
# batch_split = zip(batch[0].split(split_size, dim=1),
# batch[1].split(split_size, dim=1))
# batch_split = [ [b[0], b[1]] for i, b in enumerate(batch_split) ]
# else:
# batch_split = zip(batch[0].split(split_size, dim=1))
# batch_split = [ [b[0], None] for i, b in enumerate(batch_split) ]
return [batch]
def shuffle(self):
data = list(zip(self.src, self.tgt))
self.src, self.tgt = zip(*[data[i] for i in torch.randperm(len(data))])
def set_index(self, iteration):
assert iteration >= 0 and iteration < self.numBatches
self.cur_index = iteration
| 35.352713
| 135
| 0.546979
|
13a78ee3096804423ed04d3460f60243d070c78c
| 1,329
|
py
|
Python
|
src/profiles/forms.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | null | null | null |
src/profiles/forms.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 9
|
2017-11-21T15:45:18.000Z
|
2022-02-11T03:37:54.000Z
|
src/profiles/forms.py
|
mizamae/HomeAutomation
|
8c462ee4c31c1fea6792cb19af66a4d2cf7bb2ca
|
[
"MIT"
] | 1
|
2020-07-22T02:24:17.000Z
|
2020-07-22T02:24:17.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from django import forms
from django.contrib.auth import get_user_model
from . import models
User = get_user_model()
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('name'),
)
class Meta:
model = User
fields = ['name']
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Field('picture'),
Field('bio'),
Field('tracking'),
Field('notifications'),
Submit('update', 'Update', css_class="btn-success"),
)
class Meta:
model = models.Profile
fields = ['picture', 'bio','tracking','notifications']
| 27.6875
| 78
| 0.607223
|
cafe76f54799e953f9451465a4d90ffdbf96b08d
| 391
|
py
|
Python
|
bouncer/bouncer/wsgi.py
|
ikechuku/bouncer_rest_api
|
ca3f21a68c445fa3023168d2ecd3b38001554779
|
[
"MIT"
] | null | null | null |
bouncer/bouncer/wsgi.py
|
ikechuku/bouncer_rest_api
|
ca3f21a68c445fa3023168d2ecd3b38001554779
|
[
"MIT"
] | 8
|
2020-06-05T21:35:47.000Z
|
2021-09-22T18:42:45.000Z
|
bouncer/bouncer/wsgi.py
|
ikechuku/demo-jhipster
|
e0d0806ee75dccf77eb62b31c4d59c0bf1a7a230
|
[
"MIT"
] | null | null | null |
"""
WSGI config for bouncer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bouncer.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
138032e30e63fed8d95cf844be0f47c22a9ac7e2
| 1,092
|
py
|
Python
|
docker-image/apiserver/app/app.py
|
wix-playground/supraworker
|
9672a4f0094ef09c51e47ac6e51655ca27a636c0
|
[
"Apache-2.0"
] | 1
|
2020-09-24T00:12:58.000Z
|
2020-09-24T00:12:58.000Z
|
docker-image/apiserver/app/app.py
|
wix-playground/supraworker
|
9672a4f0094ef09c51e47ac6e51655ca27a636c0
|
[
"Apache-2.0"
] | 5
|
2020-04-05T23:31:42.000Z
|
2021-08-31T21:22:49.000Z
|
docker-image/apiserver/app/app.py
|
wix-playground/supraworker
|
9672a4f0094ef09c51e47ac6e51655ca27a636c0
|
[
"Apache-2.0"
] | 2
|
2021-08-03T11:13:43.000Z
|
2021-08-03T11:15:08.000Z
|
# -*- coding: utf-8 -*-
"""
Main file
"""
import os
import logging
from flask import Flask
from flask_restx.apidoc import apidoc
from app.resources.logs import logs_page
from app.resources.jobs import job_page
logging.basicConfig(
level= logging.DEBUG,
format='%(asctime)s {%(filename)s:%(lineno)d} %(levelname)s: %(message)s')
logging.info("Starting Flask...")
logging.info(f"Load FLASK config {os.getenv('APP_SETTINGS', 'flaskconfig.ProductionConfig')}")
app = Flask(__name__)
app.config.from_object(os.getenv('APP_SETTINGS', 'flaskconfig.ProductionConfig'))
logging.info("Register Blueprint")
apidoc.static_url_path = "{}/swagger/ui".format(app.config['URL_PREFIX'])
app.register_blueprint(job_page, url_prefix="{}/jobs".format(app.config['URL_PREFIX']))
app.register_blueprint(logs_page, url_prefix="{}/logs".format(app.config['URL_PREFIX']))
logging.info("FINISHED INITIALIZATION")
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
| 33.090909
| 94
| 0.748168
|
182f3e1b0a6c0654dfba84caf5b80a1be67ff071
| 237
|
py
|
Python
|
practice_python/list_less_then_five/list_less_then_five.py
|
alefeans/python_exercises
|
d7f8ea8dfd993f070d201202d12a526bb8105cc5
|
[
"MIT"
] | 1
|
2021-06-11T18:29:26.000Z
|
2021-06-11T18:29:26.000Z
|
practice_python/list_less_then_five/list_less_then_five.py
|
alefeans/learning_python
|
d7f8ea8dfd993f070d201202d12a526bb8105cc5
|
[
"MIT"
] | null | null | null |
practice_python/list_less_then_five/list_less_then_five.py
|
alefeans/learning_python
|
d7f8ea8dfd993f070d201202d12a526bb8105cc5
|
[
"MIT"
] | null | null | null |
def check_list(lista):
return [lista[i] for i in range(len(lista)) if lista[i] < 5]
if __name__ == "__main__":
entry = input("Give me a list of numbers: ")
lista = list(map(int, entry.split()))
print(check_list(lista))
| 26.333333
| 64
| 0.637131
|
74c5418df253777760034d01ee0a0aa455031dd4
| 11,314
|
py
|
Python
|
python/ray/serve/tests/test_backend_worker.py
|
Rid039/ray
|
eb9330df07150adbb9a4e334ace6b1a3ba6602a2
|
[
"Apache-2.0"
] | 1
|
2021-02-04T13:45:00.000Z
|
2021-02-04T13:45:00.000Z
|
python/ray/serve/tests/test_backend_worker.py
|
mfitton/ray
|
fece8db70d703da1aad192178bd50923e83cc99a
|
[
"Apache-2.0"
] | 72
|
2021-02-06T08:07:16.000Z
|
2022-03-26T07:17:49.000Z
|
python/ray/serve/tests/test_backend_worker.py
|
mfitton/ray
|
fece8db70d703da1aad192178bd50923e83cc99a
|
[
"Apache-2.0"
] | 2
|
2020-05-22T15:36:27.000Z
|
2020-05-22T15:52:03.000Z
|
import asyncio
import pytest
import numpy as np
import ray
from ray import serve
from ray.serve.backend_worker import create_backend_replica, wrap_to_ray_error
from ray.serve.controller import TrafficPolicy
from ray.serve.router import Router, RequestMetadata
from ray.serve.config import BackendConfig, BackendMetadata
from ray.serve.exceptions import RayServeException
from ray.serve.utils import get_random_letters
pytestmark = pytest.mark.asyncio
def setup_worker(name,
func_or_class,
init_args=None,
backend_config=BackendConfig(),
controller_name=""):
if init_args is None:
init_args = ()
@ray.remote
class WorkerActor:
def __init__(self):
self.worker = create_backend_replica(func_or_class)(
name, name + ":tag", init_args, backend_config,
controller_name)
def ready(self):
pass
@ray.method(num_returns=2)
async def handle_request(self, *args, **kwargs):
return await self.worker.handle_request(*args, **kwargs)
def update_config(self, new_config):
return self.worker.update_config(new_config)
async def drain_pending_queries(self):
return await self.worker.drain_pending_queries()
worker = WorkerActor.remote()
ray.get(worker.ready.remote())
return worker
async def add_servable_to_router(servable, router, controller_name, **kwargs):
worker = setup_worker(
"backend", servable, controller_name=controller_name, **kwargs)
await router._update_replica_handles.remote({"backend": [worker]})
await router._update_traffic_policies.remote({
"endpoint": TrafficPolicy({
"backend": 1.0
})
})
if "backend_config" in kwargs:
await router._update_backend_configs.remote({
"backend": kwargs["backend_config"]
})
return worker
def make_request_param(call_method="__call__"):
return RequestMetadata(
get_random_letters(10), "endpoint", call_method=call_method)
@pytest.fixture
async def router(serve_instance):
q = ray.remote(Router).remote(serve_instance._controller)
yield q
ray.kill(q)
async def test_runner_wraps_error():
wrapped = wrap_to_ray_error(Exception())
assert isinstance(wrapped, ray.exceptions.RayTaskError)
async def test_servable_function(serve_instance, router,
mock_controller_with_name):
def echo(request):
return request.query_params["i"]
await add_servable_to_router(echo, router, mock_controller_with_name[0])
for query in [333, 444, 555]:
query_param = make_request_param()
result = await (await router.assign_request.remote(
query_param, i=query))
assert result == query
async def test_servable_class(serve_instance, router,
mock_controller_with_name):
class MyAdder:
def __init__(self, inc):
self.increment = inc
def __call__(self, request):
return request.query_params["i"] + self.increment
await add_servable_to_router(
MyAdder, router, mock_controller_with_name[0], init_args=(3, ))
for query in [333, 444, 555]:
query_param = make_request_param()
result = await (await router.assign_request.remote(
query_param, i=query))
assert result == query + 3
async def test_task_runner_custom_method_single(serve_instance, router,
mock_controller_with_name):
class NonBatcher:
def a(self, _):
return "a"
def b(self, _):
return "b"
await add_servable_to_router(NonBatcher, router,
mock_controller_with_name[0])
query_param = make_request_param("a")
a_result = await (await router.assign_request.remote(query_param))
assert a_result == "a"
query_param = make_request_param("b")
b_result = await (await router.assign_request.remote(query_param))
assert b_result == "b"
query_param = make_request_param("non_exist")
with pytest.raises(ray.exceptions.RayTaskError):
await (await router.assign_request.remote(query_param))
async def test_task_runner_custom_method_batch(serve_instance, router,
mock_controller_with_name):
@serve.accept_batch
class Batcher:
def a(self, requests):
return ["a-{}".format(i) for i in range(len(requests))]
def b(self, requests):
return ["b-{}".format(i) for i in range(len(requests))]
backend_config = BackendConfig(
max_batch_size=4,
batch_wait_timeout=10,
internal_metadata=BackendMetadata(accepts_batches=True))
await add_servable_to_router(
Batcher,
router,
mock_controller_with_name[0],
backend_config=backend_config)
a_query_param = make_request_param("a")
b_query_param = make_request_param("b")
futures = [
await router.assign_request.remote(a_query_param) for _ in range(2)
]
futures += [
await router.assign_request.remote(b_query_param) for _ in range(2)
]
gathered = await asyncio.gather(*futures)
assert set(gathered) == {"a-0", "a-1", "b-0", "b-1"}
async def test_servable_batch_error(serve_instance, router,
mock_controller_with_name):
@serve.accept_batch
class ErrorBatcher:
def error_different_size(self, requests):
return [""] * (len(requests) + 10)
def error_non_iterable(self, _):
return 42
def return_np_array(self, requests):
return np.array([1] * len(requests)).astype(np.int32)
backend_config = BackendConfig(
max_batch_size=4,
internal_metadata=BackendMetadata(accepts_batches=True))
await add_servable_to_router(
ErrorBatcher,
router,
mock_controller_with_name[0],
backend_config=backend_config)
with pytest.raises(RayServeException, match="doesn't preserve batch size"):
different_size = make_request_param("error_different_size")
await (await router.assign_request.remote(different_size))
with pytest.raises(RayServeException, match="iterable"):
non_iterable = make_request_param("error_non_iterable")
await (await router.assign_request.remote(non_iterable))
np_array = make_request_param("return_np_array")
result_np_value = await (await router.assign_request.remote(np_array))
assert isinstance(result_np_value, np.int32)
async def test_task_runner_perform_batch(serve_instance, router,
mock_controller_with_name):
def batcher(requests):
batch_size = len(requests)
return [batch_size] * batch_size
config = BackendConfig(
max_batch_size=2,
batch_wait_timeout=10,
internal_metadata=BackendMetadata(accepts_batches=True))
await add_servable_to_router(
batcher, router, mock_controller_with_name[0], backend_config=config)
query_param = make_request_param()
my_batch_sizes = await asyncio.gather(*[(
await router.assign_request.remote(query_param)) for _ in range(3)])
assert my_batch_sizes == [2, 2, 1]
async def test_task_runner_perform_async(serve_instance, router,
mock_controller_with_name):
@ray.remote
class Barrier:
def __init__(self, release_on):
self.release_on = release_on
self.current_waiters = 0
self.event = asyncio.Event()
async def wait(self):
self.current_waiters += 1
if self.current_waiters == self.release_on:
self.event.set()
else:
await self.event.wait()
barrier = Barrier.remote(release_on=10)
async def wait_and_go(*args, **kwargs):
await barrier.wait.remote()
return "done!"
config = BackendConfig(
max_concurrent_queries=10,
internal_metadata=BackendMetadata(is_blocking=False))
await add_servable_to_router(
wait_and_go,
router,
mock_controller_with_name[0],
backend_config=config)
query_param = make_request_param()
done, not_done = await asyncio.wait(
[(await router.assign_request.remote(query_param)) for _ in range(10)],
timeout=10)
assert len(done) == 10
for item in done:
assert await item == "done!"
async def test_user_config_update(serve_instance, router,
mock_controller_with_name):
class Customizable:
def __init__(self):
self.reval = ""
def __call__(self, starlette_request):
return self.retval
def reconfigure(self, config):
self.retval = config["return_val"]
config = BackendConfig(
num_replicas=2, user_config={
"return_val": "original",
"b": 2
})
await add_servable_to_router(
Customizable,
router,
mock_controller_with_name[0],
backend_config=config)
query_param = make_request_param()
done = [(await router.assign_request.remote(query_param))
for _ in range(10)]
for i in done:
assert await i == "original"
config = BackendConfig()
config.user_config = {"return_val": "new_val"}
await mock_controller_with_name[1].update_backend.remote("backend", config)
done = [(await router.assign_request.remote(query_param))
for _ in range(10)]
for i in done:
assert await i == "new_val"
async def test_graceful_shutdown(serve_instance, router,
mock_controller_with_name):
class KeepInflight:
def __init__(self):
self.events = []
def reconfigure(self, config):
if config["release"]:
[event.set() for event in self.events]
async def __call__(self, _):
e = asyncio.Event()
self.events.append(e)
await e.wait()
backend_worker = await add_servable_to_router(
KeepInflight,
router,
mock_controller_with_name[0],
backend_config=BackendConfig(
num_replicas=1,
internal_metadata=BackendMetadata(is_blocking=False),
user_config={"release": False}))
query_param = make_request_param()
refs = [(await router.assign_request.remote(query_param))
for _ in range(6)]
shutdown_ref = backend_worker.drain_pending_queries.remote()
with pytest.raises(ray.exceptions.GetTimeoutError):
# Shutdown should block because there are still inflight queries.
ray.get(shutdown_ref, timeout=2)
config = BackendConfig()
config.user_config = {"release": True}
await mock_controller_with_name[1].update_backend.remote("backend", config)
# All queries should complete successfully
ray.get(refs)
# The draining operation should be completed.
ray.get(shutdown_ref)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| 31.168044
| 79
| 0.646721
|
0d691ec72cea914e2e76e9db7179d1a70b269fcf
| 2,954
|
py
|
Python
|
start.py
|
joezawisa/dockermc
|
d4e622ba9dc23fee613e7b14154780e66df9cc00
|
[
"Apache-2.0"
] | null | null | null |
start.py
|
joezawisa/dockermc
|
d4e622ba9dc23fee613e7b14154780e66df9cc00
|
[
"Apache-2.0"
] | null | null | null |
start.py
|
joezawisa/dockermc
|
d4e622ba9dc23fee613e7b14154780e66df9cc00
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import requests
import os
import sys
# Function to load version manifest
def get_manifest(url):
r = requests.get(url)
if r.ok:
return r.json()
else:
print('Error: failed to load manifest', file=sys.stderr)
sys.exit(1)
# Parse command line arguments
parser = argparse.ArgumentParser(description='Start a minecraft server in Docker.')
parser.add_argument('-v', '--version', metavar='version', type=str, help='minecraft version', required=False, default='latest')
parser.add_argument('-m', '--manifest', metavar='manifest', type=str, help='version manifest', required=False, default='https://launchermeta.mojang.com/mc/game/version_manifest_v2.json')
arguments = vars(parser.parse_args())
# Create required directories if they don't already exist
for directory in ['versions', 'data']:
if not os.path.exists(directory):
os.mkdir(directory)
version = arguments['version']
manifest = None
# If the user chose to run the latest version, find out what version that is
if version == 'latest':
# Get version manifest
manifest = get_manifest(arguments['manifest'])
# Get the version number of the latest release from the manifest
try:
version = manifest['latest']['release']
except KeyError:
print('Error: failed to find latest release in manifest', file=sys.stderr)
sys.exit(1)
# Check whether we already have the requested version
if not os.path.exists(f'versions/{version}.jar'):
if manifest is None:
manifest = get_manifest(arguments['manifest'])
# If it's missing, find it in the version manifest
server = None
try:
for v in manifest['versions']:
if v['id'] == version:
# Get version info
r = requests.get(v['url'])
if r.ok:
metadata = r.json()
server = metadata['downloads']['server']['url']
else:
print('Error: failed to load version info', file=sys.stderr)
sys.exit(1)
break
except KeyError:
print('Error: failed to find server URL', file=sys.stderr)
sys.exit(1)
# Download the server (we do this in chunks since it might be big)
if server is None:
print('Error: Requested version not found', file=sys.stderr)
sys.exit(1)
else:
with requests.get(server, stream=True) as r:
if r.ok:
with open(f'versions/{version}.jar', 'wb') as jar:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
jar.write(chunk)
else:
print('Error: failed to download server', file=sys.stderr)
sys.exit(1)
# Start the server
os.chdir('./data')
os.execvp(file='java', args=['java', '-Xmx1024M', '-Xms1024M', '-jar', f'../versions/{version}.jar', 'nogui'])
| 36.925
| 186
| 0.611713
|
0e83acad0e58eff2facc4247cbcec272ebd3b2a5
| 1,705
|
py
|
Python
|
cacreader/swig-4.0.2/Examples/python/import/runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 1,031
|
2015-01-02T14:08:47.000Z
|
2022-03-29T02:25:27.000Z
|
cacreader/swig-4.0.2/Examples/python/import/runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 240
|
2015-01-11T04:27:19.000Z
|
2022-03-30T00:35:57.000Z
|
cacreader/swig-4.0.2/Examples/python/import/runme.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | 224
|
2015-01-05T06:13:54.000Z
|
2022-02-25T14:39:51.000Z
|
# file: runme.py
# Test various properties of classes defined in separate modules
print "Testing the %import directive"
import base
import foo
import bar
import spam
# Create some objects
print "Creating some objects"
a = base.Base()
b = foo.Foo()
c = bar.Bar()
d = spam.Spam()
# Try calling some methods
print "Testing some methods"
print "",
print "Should see 'Base::A' ---> ",
a.A()
print "Should see 'Base::B' ---> ",
a.B()
print "Should see 'Foo::A' ---> ",
b.A()
print "Should see 'Foo::B' ---> ",
b.B()
print "Should see 'Bar::A' ---> ",
c.A()
print "Should see 'Bar::B' ---> ",
c.B()
print "Should see 'Spam::A' ---> ",
d.A()
print "Should see 'Spam::B' ---> ",
d.B()
# Try some casts
print "\nTesting some casts\n"
print "",
x = a.toBase()
print "Should see 'Base::A' ---> ",
x.A()
print "Should see 'Base::B' ---> ",
x.B()
x = b.toBase()
print "Should see 'Foo::A' ---> ",
x.A()
print "Should see 'Base::B' ---> ",
x.B()
x = c.toBase()
print "Should see 'Bar::A' ---> ",
x.A()
print "Should see 'Base::B' ---> ",
x.B()
x = d.toBase()
print "Should see 'Spam::A' ---> ",
x.A()
print "Should see 'Base::B' ---> ",
x.B()
x = d.toBar()
print "Should see 'Bar::B' ---> ",
x.B()
print "\nTesting some dynamic casts\n"
x = d.toBase()
print " Spam -> Base -> Foo : ",
y = foo.Foo_fromBase(x)
if y:
print "bad swig"
else:
print "good swig"
print " Spam -> Base -> Bar : ",
y = bar.Bar_fromBase(x)
if y:
print "good swig"
else:
print "bad swig"
print " Spam -> Base -> Spam : ",
y = spam.Spam_fromBase(x)
if y:
print "good swig"
else:
print "bad swig"
print " Foo -> Spam : ",
y = spam.Spam_fromBase(b)
if y:
print "bad swig"
else:
print "good swig"
| 15.787037
| 64
| 0.57654
|
d18bcf0ffa5e179f264dac8fd02a77b6934e007c
| 8,349
|
py
|
Python
|
component/tile/bfast_tile.py
|
sepal-contrib/bfast_gpu
|
22214b0277993835f41f07e6646e6e15deecc9d9
|
[
"MIT"
] | 2
|
2021-05-21T06:23:20.000Z
|
2021-06-29T06:03:25.000Z
|
component/tile/bfast_tile.py
|
sepal-contrib/bfast_gpu
|
22214b0277993835f41f07e6646e6e15deecc9d9
|
[
"MIT"
] | 28
|
2021-04-23T09:38:36.000Z
|
2021-09-28T09:44:00.000Z
|
component/tile/bfast_tile.py
|
sepal-contrib/bfast_gpu
|
22214b0277993835f41f07e6646e6e15deecc9d9
|
[
"MIT"
] | 2
|
2021-05-01T09:35:59.000Z
|
2021-06-22T10:53:33.000Z
|
from pathlib import Path
from datetime import datetime as dt
import ipyvuetify as v
from sepal_ui import sepalwidgets as sw
from sepal_ui.scripts import utils as su
from traitlets import Any
from component import widget as cw
from component.message import cm
from component import scripts as cs
from component import parameter as cp
class BfastTile(sw.Tile):
dir_ = Any(None).tag(sync=True)
def __init__(self):
# create the different widgets
# I will not use Io as the information doesn't need to be communicated to any other tile
self.folder = cw.FolderSelect()
self.out_dir = cw.OutDirSelect()
self.tiles = cw.TilesSelect()
self.poly = v.Select(
label=cm.widget.harmonic.label, v_model=3, items=[i for i in range(3, 11)]
)
self.freq = v.Slider(
label=cm.widget.freq.label,
v_model=12,
min=1,
max=12,
thumb_label="always",
class_="mt-5",
)
self.trend = v.Switch(v_model=False, label=cm.widget.trend.label)
self.hfrac = v.Select(
label=cm.widget.hfrac.label, v_model=0.25, items=[0.25, 0.5, 1.0]
)
self.level = v.Slider(
label=cm.widget.level.label,
v_model=0.95,
step=0.001,
min=0.95,
max=1,
thumb_label="always",
class_="mt-5",
)
self.backend = cw.BackendSelect()
self.monitoring = cw.DateRangeSlider(label=cm.widget.monitoring.label)
self.history = cw.DateSlider(label=cm.widget.history.label)
# stack the advance parameters in a expandpanel
advance_params = v.ExpansionPanels(
class_="mb-5",
popout=True,
children=[
v.ExpansionPanel(
children=[
v.ExpansionPanelHeader(children=[cm.widget.advance_params]),
v.ExpansionPanelContent(
children=[
v.Flex(xs12=True, children=[self.freq]),
v.Flex(xs12=True, children=[self.hfrac]),
v.Flex(xs12=True, children=[self.level]),
v.Flex(xs12=True, children=[self.backend]),
]
),
]
)
],
)
# create the tile
super().__init__(
"bfast_tile",
cm.bfast.folder, # the title is used to describe the first section
inputs=[
self.folder,
self.out_dir,
self.tiles,
v.Html(tag="h2", children=[cm.bfast.process]),
self.poly,
self.trend,
advance_params,
v.Html(tag="h2", children=[cm.bfast.periods]),
self.history,
self.monitoring,
],
alert=cw.CustomAlert(),
btn=sw.Btn(cm.bfast.btn),
)
# add js behaviour
self.folder.observe(self._on_folder_change, "v_model")
self.btn.on_event("click", self._start_process)
self.monitoring.observe(self._check_periods, "v_model")
self.history.observe(self._check_periods, "v_model")
@su.loading_button(debug=True)
def _start_process(self, widget, event, data):
"""start the bfast process"""
# gather all the variables for conveniency
folder = self.folder.v_model
out_dir = self.out_dir.v_model
tiles = self.tiles.v_model
poly = self.poly.v_model
freq = min(self.freq.v_model * 31, 365)
trend = self.trend.v_model
hfrac = self.hfrac.v_model
level = self.level.v_model
backend = self.backend.v_model
monitoring = self.monitoring.v_model
history = self.history.v_model
# check the inputs
if not self.alert.check_input(folder, cm.widget.folder.no_folder):
return
if not self.alert.check_input(out_dir, cm.widget.out_dir.no_dir):
return
if not self.alert.check_input(tiles, cm.widget.tiles.no_tiles):
return
if not self.alert.check_input(poly, cm.widget.harmonic.no_poly):
return
if not self.alert.check_input(freq, cm.widget.freq.no_freq):
return
if not self.alert.check_input(trend, cm.widget.trend.no_trend):
return
if not self.alert.check_input(hfrac, cm.widget.hfrac.no_frac):
return
if not self.alert.check_input(level, cm.widget.level.no_level):
return
if not self.alert.check_input(backend, cm.widget.backend.no_backend):
return
if not self.alert.check_input(len(monitoring), cm.widget.monitoring.no_dates):
return
if not self.alert.check_input(history, cm.widget.history.no_date):
return
# check the dates
if not (history < monitoring[0] < monitoring[1]):
return self.alert.add_msg(cm.widget.monitoring.bad_order, "error")
# run the bfast process
res = cs.run_bfast(
Path(folder),
out_dir,
tiles,
monitoring,
history,
freq,
poly,
hfrac,
trend,
level,
backend,
self.alert,
)
print(type(res))
print(res)
self.dir_ = res
# display the end of computation message
self.alert.add_live_msg(
cm.bfast.complete.format(str(cp.result_dir / out_dir)), "success"
)
def _on_folder_change(self, change):
"""
Change the available tiles according to the selected folder
Raise an error if the folder is not structured as a SEPAL time series (i.e. folder number for each tile)
"""
# get the new selected folder
folder = Path(change["new"])
# reset the widgets
self.out_dir.v_model = None
self.tiles.reset()
# check if it's a time series folder
if not self.folder.is_valid_ts():
# reset the non working inputs
self.monitoring.disable()
self.history.disable()
self.tiles.reset()
self.dates_0 = None
# display a message to the end user
self.alert.add_msg(cm.widget.folder.no_ts.format(folder), "warning")
return self
# set the basename
self.out_dir.set_folder(folder)
# set the items in the dropdown
self.tiles.set_items(folder)
# set the dates for the sliders
# we consider that the dates are consistent through all the folders so we can use only the first one
with (folder / "0" / "dates.csv").open() as f:
dates = sorted(
[
dt.strptime(l, "%Y-%m-%d")
for l in f.read().splitlines()
if l.rstrip()
]
)
self.monitoring.set_dates(dates)
self.history.set_dates(dates)
self.alert.add_msg(cm.widget.folder.valid_ts.format(folder))
return self
def _check_periods(self, change):
"""check if the historical period have enough images"""
# to avoid bug on disable
if not (self.history.dates and self.monitoring.dates):
return self
# get the dates from the folder
folder = Path(self.folder.v_model)
with (folder / "0" / "dates.csv").open() as f:
dates = sorted(
[
dt.strptime(l, "%Y-%m-%d")
for l in f.read().splitlines()
if l.rstrip()
]
)
# get the value of the current history and monitoring dates
history = next(d[0] for d in enumerate(dates) if d[1] > self.history.v_model)
monitor = next(
d[0] for d in enumerate(dates) if d[1] > self.monitoring.v_model[0]
)
if history > (monitor - cp.min_images):
self.alert.add_msg(cm.widget.history.too_short, "warning")
else:
self.alert.reset()
return self
| 33.262948
| 112
| 0.548329
|
3e3b076c7313d33e4b59b5611b24294163ec667b
| 4,145
|
py
|
Python
|
tgbot/ext/persistent/sqlite.py
|
JacoRuit/tgbot
|
1e32a802bc4869d46562ec4c2d46dc691868061c
|
[
"MIT"
] | 2
|
2016-08-09T21:28:04.000Z
|
2016-08-16T12:04:40.000Z
|
tgbot/ext/persistent/sqlite.py
|
JacoRuit/tgbot
|
1e32a802bc4869d46562ec4c2d46dc691868061c
|
[
"MIT"
] | null | null | null |
tgbot/ext/persistent/sqlite.py
|
JacoRuit/tgbot
|
1e32a802bc4869d46562ec4c2d46dc691868061c
|
[
"MIT"
] | null | null | null |
import threading
import sqlite3
import json
from tgbot.ext.persistent import Storage, PersistentChat, PersistentUser
class ThreadsafeCursor(object):
def __init__(self, connection):
self._mutex = threading.Lock()
self._connection = connection
self._cursor = connection.cursor()
def _inside(self):
if self._mutex.acquire(False):
self._mutex.release()
return False
else:
return True
def __enter__(self):
self._mutex.acquire()
def __exit__(self, type, value, traceback):
self._connection.commit()
self._mutex.release()
def __getattr__(self, key):
attr = getattr(self._cursor, key)
if hasattr(attr, "__call__"):
def wrap(*args, **kwargs):
if not self._inside():
raise RuntimeError("Can only call cursor functions inside with block")
return attr(*args, **kwargs)
return wrap
else:
return attr
class SQLiteStorage(Storage):
def __init__(self, database_path):
self.database_path = database_path
self.connection = None
self.cursor = None
def create_tables_ifnexists(self):
with self.cursor:
self.cursor.execute("create table if not exists chats (id text unique, type text, title text, username text, first_name text, last_name text, data text)")
self.cursor.execute("create table if not exists users (id text unique, first_name text, last_name text, username text, data text)")
def load_chats(self, bot):
with self.cursor:
for (id, type, title, username, first_name, last_name, data_json) in self.cursor.execute("select * from chats"):
try: id = int(id) # Try to convert back to int (will fail for channels)
except: pass
bot.add_chat(id, {
"type": type, "username": username, "first_name": first_name, "last_name": last_name # Data as sent by Telegram API
})
try:
bot.chats[id].data = json.loads(data_json)
except: pass
def load_users(self, bot):
with self.cursor:
for (id, first_name, last_name, username, data_json) in self.cursor.execute("select * from users"):
try: id = int(id) # Try to convert back to int (will fail for channels)
except: pass
bot.add_user(id, {
"first_name": first_name, "last_name": last_name, "username": username # Data as sent by Telegram API
})
try:
bot.users[id].data = json.loads(data_json)
except: pass
def sql_value(self, var):
return str(var) if var is not None else "NULL"
def save_chats(self, bot):
with self.cursor:
for chat in bot.chats.values():
self.cursor.execute("insert or ignore into chats(id) values (?)", [self.sql_value(chat.id)])
self.cursor.execute(
"update chats set type = ?, title = ?, username = ?, first_name = ?, last_name = ?, data = ? WHERE id = ?",
[self.sql_value(x) for x in [chat.type, chat.title, chat.username, chat.first_name, chat.last_name, json.dumps(chat.data), chat.id]])
def save_users(self, bot):
with self.cursor:
for user in bot.users.values():
self.cursor.execute("insert or ignore into users(id) values (?)", [self.sql_value(user.id)])
self.cursor.execute(
"update users set first_name = ?, last_name = ?, username = ?, data = ? WHERE id = ?",
[self.sql_value(x) for x in [user.first_name, user.last_name, user.username, json.dumps(user.data), user.id]])
def open(self):
self.connection = sqlite3.connect(self.database_path)
self.cursor = ThreadsafeCursor(self.connection)
self.create_tables_ifnexists()
def close(self):
with self.cursor:
self.cursor.close()
self.connection.close()
| 40.637255
| 166
| 0.586972
|
2c86f1741a61ee77235397dc66c5de34a8463caa
| 2,651
|
py
|
Python
|
Wager/migrations/0001_initial.py
|
tfleech/Wager
|
b6dc658db063fce7d76ab39619fb8ce2ee83981a
|
[
"MIT"
] | null | null | null |
Wager/migrations/0001_initial.py
|
tfleech/Wager
|
b6dc658db063fce7d76ab39619fb8ce2ee83981a
|
[
"MIT"
] | null | null | null |
Wager/migrations/0001_initial.py
|
tfleech/Wager
|
b6dc658db063fce7d76ab39619fb8ce2ee83981a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-16 04:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Terms', models.CharField(max_length=200)),
('Stakes', models.CharField(max_length=300)),
('Due_date', models.DateTimeField(default=datetime.datetime(2017, 3, 17, 4, 5, 34, 64194, tzinfo=utc))),
('Status', models.IntegerField()),
('Public', models.BooleanField()),
('Date_created', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user1_id', models.IntegerField()),
('user2_id', models.IntegerField()),
('status', models.IntegerField()),
('action_user_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_Name', models.CharField(max_length=50)),
('Password', models.CharField(max_length=50)),
('Date_joined', models.DateTimeField()),
('Wins', models.IntegerField(default=0)),
('Losses', models.IntegerField(default=0)),
('WinPercent', models.IntegerField(default=0)),
('LossPercent', models.IntegerField(default=0)),
],
),
migrations.AlterUniqueTogether(
name='relationship',
unique_together=set([('user1_id', 'user2_id')]),
),
migrations.AddField(
model_name='bet',
name='user1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user1', to='Wager.User'),
),
migrations.AddField(
model_name='bet',
name='user2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user2', to='Wager.User'),
),
]
| 38.42029
| 120
| 0.563184
|
b278c7bd58db1982fd34b24d77b49b41b336f81f
| 6,763
|
py
|
Python
|
src/generate_snapshot_data/dump.py
|
saheel1115/szz-
|
dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3
|
[
"MIT"
] | 9
|
2017-10-21T13:29:46.000Z
|
2022-01-10T23:49:54.000Z
|
src/generate_snapshot_data/dump.py
|
saheel1115/szz-
|
dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3
|
[
"MIT"
] | 3
|
2018-01-09T11:28:55.000Z
|
2019-01-20T08:45:18.000Z
|
src/generate_snapshot_data/dump.py
|
saheel1115/szz-
|
dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3
|
[
"MIT"
] | 1
|
2020-12-29T05:10:31.000Z
|
2020-12-29T05:10:31.000Z
|
#!/usr/bin/python
import argparse
import os, sys, inspect
import os.path
import shutil
import logging
from git import *
from projDB import DbProj
#from GitRepo import GitRepo
sys.path.append("src/util")
sys.path.append("src/changes")
from Config import Config
from OutDir import OutDir
import Log
from Util import cd
import Util
from datetime import datetime
from datetime import timedelta
import ntpath, pickle
#--------------------------------------------------------------------------------------------------------------------------
def pathLeaf(path):
"""
Returns the basename of the file/directory path in an _extremely_ robust way.
For example, pathLeaf('/hame/saheel/git_repos/szz/abc.c/') will return 'abc.c'.
Args
----
path: string
Path to some file or directory in the system
Returns
-------
string
Basename of the file or directory
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
#--------------------------------------------------------------------------------------------------------------------------
def getProjName(projectPath):
proj_path = projectPath.rstrip(os.sep)
project_name = proj_path.split(os.sep)[-1]
return project_name
#--------------------------------------------------------------------------------------------------------------------------
def dumpSnapShots(srcPath, destPath, ss_interval_len, commitDateMin, commitDateMax):
print srcPath, destPath, commitDateMin, commitDateMax
repo = Repo(srcPath)
branch = repo.active_branch
print branch
project_name = getProjName(srcPath)
start_date = commitDateMin + timedelta(days=1)
while start_date <= commitDateMax:
#snapshot = destPath + os.sep + project_name + os.sep + project_name + "_" + str(start_date)
snapshot = destPath + os.sep + project_name + os.sep + str(start_date)
print snapshot
if not os.path.isdir(snapshot):
Util.copy_dir(srcPath,snapshot)
git_command = "git checkout `git rev-list -n 1 --no-merges --before=\"" + str(start_date) + "\" " + str(branch) + "`"
with cd(snapshot):
os.system("git reset --hard")
#os.system("git checkout")
os.system(git_command)
start_date = start_date + timedelta(days=ss_interval_len*30)
#snapshot = destPath + os.sep + project_name + os.sep + project_name + "_" + str(commitDateMax)
start_date = commitDateMax
snapshot = destPath + os.sep + project_name + os.sep + str(start_date)
print snapshot
if not os.path.isdir(snapshot):
Util.copy_dir(srcPath,snapshot)
git_command = "git checkout `git rev-list -n 1 --no-merges --before=\"" + str(start_date) + "\" " + str(branch) + "`"
with cd(snapshot):
os.system("git reset --hard")
#os.system("git checkout")
os.system(git_command)
#--------------------------------------------------------------------------------------------------------------------------
def fetchCommitDates(cfg, projectPath, language):
db_config = cfg.ConfigSectionMap("Database")
logging.debug("Database configuration = %r\n", db_config)
proj_path = projectPath.rstrip(os.sep)
project_name = proj_path.split(os.sep)[-1]
logging.debug("project = %r\n", project_name)
proj = DbProj(project_name, language)
proj.connectDb(db_config['database'], db_config['user'], db_config['host'], db_config['port'])
proj.fetchDatesFromTable(db_config['table'])
logging.debug(proj)
print proj
print proj.projects
assert(len(proj.projects) == 1)
_ , _ , commit_date_min, commit_date_max = proj.projects[0]
return (commit_date_min, commit_date_max)
#=============================================================================================
#=============================================================================================
# Utility to take snapshots of git repositories at 6 months interval
# 1. First, retrieve the 1st commit date from SQL server
# 2. Copy the projects to directories: project_date1, project_date2, .... in each 6 months interval
# 3. For each copy checkout the dump upto that date
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Utility to take snapshots of git repositories at specified (in months) interval')
#project specific arguments
parser.add_argument('-p',dest="proj_dir", help="the directory containing original src code")
parser.add_argument('-d',dest="out_dir", default='out_dir', help="directories to dump the snapshots")
parser.add_argument('-l',dest="lang", default='java', help="languages to be processed")
parser.add_argument('-m',dest="ss_interval_len", default='6', help="duration of interval (in months) between two snapshots")
#logging and config specific arguments
parser.add_argument("-v", "--verbose", default = 'w', nargs="?", \
help="increase verbosity: d = debug, i = info, w = warnings, e = error, c = critical. " \
"By default, we will log everything above warnings.")
parser.add_argument("--log", dest="log_file", default='log.txt', \
help="file to store the logs, by default it will be stored at log.txt")
parser.add_argument("--conf", dest="config_file", default='config.ini', help="configuration file, default is config.ini")
args = parser.parse_args()
if not os.path.isdir(args.proj_dir):
print "!! Please provide a valid directory, given: %s" % (args.proj_dir)
sys.exit()
print "Going to take snapshot for project %s" % (args.proj_dir)
print "Creating output directory at %s" % (args.out_dir)
Util.cleanup(args.log_file)
Log.setLogger(args.verbose, args.log_file)
cfg = Config(args.config_file)
#1. First, retrieve the 1st commit date from SQL server
commit_dates = fetchCommitDates(cfg, args.proj_dir, args.lang)
#2. Snapshot
dumpSnapShots(args.proj_dir, args.out_dir, int(args.ss_interval_len), commit_dates[0], commit_dates[1])
project_name = pathLeaf(args.proj_dir)
ss_dir = os.path.abspath(args.out_dir)
ss_names = os.listdir(ss_dir + '/' + project_name)
ss_names.sort()
ss_paths = [ss_dir + '/' + project_name + '/' + ss_name + '/' for ss_name in ss_names]
ss_name_to_sha = {}
for ss_index, ss_path in enumerate(ss_paths):
repo = Repo(ss_path)
ss_sha = repo.git.log('--format=%H', '-n', '1')
ss_name_to_sha[ss_names[ss_index]] = ss_sha
with open(ss_dir + '/' + project_name + '/ss_sha_info.txt', 'wb') as out_file:
pickle.dump(ss_name_to_sha, out_file)
| 34.505102
| 131
| 0.600917
|
c6d081bc4881fee1a25eca9f6fed0c003a7b8ecd
| 3,869
|
py
|
Python
|
examples/python/copula.py
|
HarryPetr1969/statsmodels
|
fb448540aa0fa354d4cd2fe7b161cb949ce91888
|
[
"BSD-3-Clause"
] | 6,931
|
2015-01-01T11:41:55.000Z
|
2022-03-31T17:03:24.000Z
|
examples/python/copula.py
|
HarryPetr1969/statsmodels
|
fb448540aa0fa354d4cd2fe7b161cb949ce91888
|
[
"BSD-3-Clause"
] | 6,137
|
2015-01-01T00:33:45.000Z
|
2022-03-31T22:53:17.000Z
|
examples/python/copula.py
|
HarryPetr1969/statsmodels
|
fb448540aa0fa354d4cd2fe7b161cb949ce91888
|
[
"BSD-3-Clause"
] | 2,608
|
2015-01-02T21:32:31.000Z
|
2022-03-31T07:38:30.000Z
|
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook copula.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Copula - Multivariate joint distribution
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import stats
sns.set_style("darkgrid")
sns.mpl.rc("figure", figsize=(8, 8))
# When modeling a system, there are often cases where multiple parameters
# are involved. Each of these parameters could be described with a given
# Probability Density Function (PDF). If would like to be able to generate a
# new set of parameter values, we need to be able to sample from these
# distributions-also called marginals. There are mainly two cases: *(i)*
# PDFs are independent; *(ii)* there is a dependency. One way to model the
# dependency it to use a **copula**.
# ## Sampling from a copula
#
# Let's use a bi-variate example and assume first that we have a prior and
# know how to model the dependence between our 2 variables.
#
# In this case, we are using the Gumbel copula and fix its hyperparameter
# `theta=2`. We can visualize it's 2-dimensional PDF.
from statsmodels.distributions.copula.api import CopulaDistribution, GumbelCopula
copula = GumbelCopula(theta=2)
_ = copula.plot_pdf() # returns a matplotlib figure
# And we can sample the PDF.
sample = copula.rvs(10000)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="hex")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# Let's come back to our 2 variables for a second. In this case we
# consider them to be gamma and normally distributed. If they would be
# independent from each other, we could sample from each PDF individually.
# Here we use a convenient class to do the same operation.
#
# ### Reproducibility
#
# Generating reproducible random values from copulas required explicitly
# setting the `seed` argument.
# `seed` accepts either an initialized NumPy `Generator` or `RandomState`,
# or any argument acceptable
# to `np.random.default_rng`, e.g., an integer or a sequence of integers.
# This example uses an
# integer.
#
# The singleton `RandomState` that is directly exposed in the `np.random`
# distributions is
# not used, and setting `np.random.seed` has no effect on the values
# generated.
marginals = [stats.gamma(2), stats.norm]
joint_dist = CopulaDistribution(marginals=marginals, copula=None)
sample = joint_dist.rvs(512, random_state=20210801)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="scatter")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# Now, above we have expressed the dependency between our variables using
# a copula, we can use this copula to sample a new set of observation with
# the same convenient class.
joint_dist = CopulaDistribution(marginals=marginals, copula=copula)
# Use an initialized Generator object
rng = np.random.default_rng([2, 0, 2, 1, 0, 8, 0, 1])
sample = joint_dist.rvs(512, random_state=rng)
h = sns.jointplot(x=sample[:, 0], y=sample[:, 1], kind="scatter")
_ = h.set_axis_labels("X1", "X2", fontsize=16)
# There are two things to note here. *(i)* as in the independent case, the
# marginals are correctly showing a gamma and normal distribution; *(ii)*
# the dependence is visible between the two variables.
# ## Estimating copula parameters
#
# Now, imagine we already have experimental data and we know that there is
# a dependency that can be expressed using a Gumbel copula. But we don't
# know what is the hyperparameter value for our copula. In this case, we can
# estimate the value.
#
# We are going to use the sample we just generated as we already know the
# value of the hyperparameter we should get: `theta=2`.
copula = GumbelCopula()
theta = copula.fit_corr_param(sample)
print(theta)
# We can see that the estimated hyperparameter value is close to the value
# set previously.
| 36.847619
| 81
| 0.745929
|
475cceeef187ebb47ef961786a0cf55184a33c66
| 2,885
|
py
|
Python
|
spotty/providers/aws/deployment/project_resources/instance_stack.py
|
Inculus/spotty
|
56863012668a6c13ad13c2a04f900047e229fbe6
|
[
"MIT"
] | 1
|
2020-07-17T07:02:09.000Z
|
2020-07-17T07:02:09.000Z
|
spotty/providers/aws/deployment/project_resources/instance_stack.py
|
Inculus/spotty
|
56863012668a6c13ad13c2a04f900047e229fbe6
|
[
"MIT"
] | null | null | null |
spotty/providers/aws/deployment/project_resources/instance_stack.py
|
Inculus/spotty
|
56863012668a6c13ad13c2a04f900047e229fbe6
|
[
"MIT"
] | null | null | null |
import boto3
from spotty.commands.writers.abstract_output_writrer import AbstractOutputWriter
from spotty.providers.aws.aws_resources.instance import Instance
from spotty.providers.aws.aws_resources.stack import Stack
class InstanceStackResource(object):
def __init__(self, project_name: str, instance_name: str, region: str):
self._cf = boto3.client('cloudformation', region_name=region)
self._ec2 = boto3.client('ec2', region_name=region)
self._region = region
self._stack_name = 'spotty-instance-%s-%s' % (project_name.lower(), instance_name.lower())
@property
def name(self):
return self._stack_name
def get_instance(self):
return Instance.get_by_stack_name(self._ec2, self.name)
def create_or_update_stack(self, template: str, parameters: dict, output: AbstractOutputWriter):
"""Runs CloudFormation template."""
# delete the stack if it exists
stack = Stack.get_by_name(self._cf, self._stack_name)
if stack:
self.delete_stack(output)
# create new stack
stack = Stack.create_stack(
cf=self._cf,
StackName=self._stack_name,
TemplateBody=template,
Parameters=[{'ParameterKey': key, 'ParameterValue': value} for key, value in parameters.items()],
Capabilities=['CAPABILITY_IAM'],
OnFailure='DO_NOTHING',
)
output.write('Waiting for the stack to be created...')
resource_messages = [
('Instance', 'launching the instance'),
('DockerReadyWaitCondition', 'waiting for the Docker container to be ready'),
]
# wait for the stack to be created
with output.prefix(' '):
stack = stack.wait_status_changed(waiting_status='CREATE_IN_PROGRESS',
resource_messages=resource_messages,
resource_success_status='CREATE_COMPLETE', output=output)
if stack.status != 'CREATE_COMPLETE':
raise ValueError('Stack "%s" was not created.\n'
'Please, see CloudFormation logs for the details.'
% self._stack_name)
return stack
def delete_stack(self, output: AbstractOutputWriter, no_wait=False):
stack = Stack.get_by_name(self._cf, self._stack_name)
if not stack:
return
if not no_wait:
output.write('Waiting for the stack to be deleted...')
# delete the stack
try:
stack.delete()
if not no_wait:
stack.wait_stack_deleted()
except Exception as e:
raise ValueError('Stack "%s" was not deleted. Error: %s\n'
'See CloudFormation logs for details.' % (self._stack_name, str(e)))
| 37.960526
| 109
| 0.612478
|
2dc0f5fd32bbadaee8f799098aaeede3a0a6a025
| 31,546
|
py
|
Python
|
test_ghost.py
|
JIABI/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 2
|
2021-08-23T08:43:35.000Z
|
2021-11-28T17:22:29.000Z
|
test_ghost.py
|
selkerdawy/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 1
|
2021-11-01T08:35:07.000Z
|
2021-11-01T08:35:07.000Z
|
test_ghost.py
|
selkerdawy/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 3
|
2021-11-10T08:37:50.000Z
|
2022-02-08T13:28:16.000Z
|
import argparse
import os, time
import torch
import shutil
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
from pthflops import count_ops
import torch.optim as optim
from thop import profile
from thop import clever_format
from torch.autograd import Variable
import models
import optim
import torch.backends.cudnn as cudnn
from cyclicLR import CyclicCosAnnealingLR
from torchsummary import summary
from deepshift.convert import convert_to_shift, round_shift_weights, count_layer_type
import distutils.util
import matplotlib.pyplot as plt
import time
from pathlib import Path
from adder import Adder2D
from models import adder as adder_slow
from adder import adder as adder_fast
import deepshift
from models import resnet20_shiftadd_ghost
from models import mobilenet_shiftadd_ghost
from models import shufflenet_shiftadd
from models import shufflenet_shiftadd_ghost
from models import shufflenet_shiftadd_ghost_cpu
from se_shift.utils_optim import SGD
from se_shift import SEConv2d, SELinear
from se_shift.utils_quantize import sparsify_and_nearestpow2
from se_shift.utils_swa import bn_update, moving_average
from collections import OrderedDict
import pytorch_model_summary as pms
from torchvision.models import shufflenet_v2_x0_5
from models import wideres
CUDA_VISIBLE_DEVICES = 0
import summary_model
# Training settings
parser = argparse.ArgumentParser(description='PyTorch AdderNet Trainning')
parser.add_argument('--data', type=str, default='D:/datasets/imagenet-mini', help='path to imagenet')
parser.add_argument('--dataset', type=str, default='cifar10', help='training dataset')
parser.add_argument('--data_path', type=str, default='/home/bella/Desktop/ShiftAddNet', help='path to dataset')
parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size for training')
parser.add_argument('--test_batch_size', type=int, default=64, metavar='N', help='batch size for testing')
parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='restart point')
parser.add_argument('--schedule', type=int, nargs='+', default=[80, 120], help='learning rate schedule')
parser.add_argument('--lr', type=float, default=0.20, metavar='LR', help='learning rate')
parser.add_argument('--lr-sign', default=None, type=float, help='separate initial learning rate for sign params')
parser.add_argument('--lr_decay', default='stepwise', type=str, choices=['stepwise', 'cosine', 'cyclic_cosine'])
parser.add_argument('--optimizer', type=str, default='sgd', help='used optimizer')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum')
parser.add_argument('--weight_decay', '--wd', default=1e-5, type=float, metavar='W', help='weight decay')
parser.add_argument('--resume', default='./temp/shift_ps_40_wb_5_add-None/mobilenetv2_ghost_100%_2x.pth.tar', type=str, metavar='PATH', help='path to latest checkpoint')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed')
parser.add_argument('--save', default='./temp', type=str, metavar='PATH', help='path to save prune model')
parser.add_argument('--arch', default='mobilenet_shiftadd_ghost', type=str, help='architecture to use')
parser.add_argument('--no-cuda', action='store_true', default=True, help='disables CUDA training')
parser.add_argument('--log_interval', type=int, default=100, metavar='N', help='how many batches to wait before logging training status')
# multi-gpus
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
# shift hyper-parameters
parser.add_argument('--shift_depth', type=int, default=40, help='how many layers to convert to shift')
parser.add_argument('--shift_type', type=str, choices=['Q', 'PS'], default='PS', help='shift type for representing weights')
parser.add_argument('--rounding', default='deterministic', choices=['deterministic', 'stochastic'])
parser.add_argument('--weight_bits', type=int, default=5, help='number of bits to represent the shift weights')
parser.add_argument('--sign_threshold_ps', type=float, default=None, help='can be controled')
parser.add_argument('--use_kernel', type=lambda x: bool(distutils.util.strtobool(x)), default=False, help='whether using custom shift kernel')
# add hyper-parameters
parser.add_argument('--add_quant', type=bool, default=True, help='whether to quantize adder layer')
parser.add_argument('--add_bits', type=int, help='number of bits to represent the adder filters')
parser.add_argument('--add_sparsity', type=float, default=None, help='sparsity in adder filters')
parser.add_argument('--quantize_v', type=str, default='sbm', help='quantize version')
# shift hyper-parameters
parser.add_argument('--shift_quant_bits', type=int, default=32, help='quantization training for shift layer')
#parser.add_argument('--sign_threshold', type=float, default=0, help='Threshold for pruning.')
#parser.add_argument('--distributed', action='store_true', help='whether to use distributed training')
# distributed parallel
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--port", type=str, default="15000")
parser.add_argument('--distributed', action='store_true', help='whether to use distributed training')
# eval only
parser.add_argument('--eval_only', action='store_true', default=True, help='whether only evaluation')
parser.add_argument('--l1', action='store_true', default=True, help='whether sparse shift l1 norm')
# sparse
parser.add_argument('--threshold', type=float, default=1 * 1e-4, # (>= 2^-7)
help='Threshold in prune weight.')
parser.add_argument('--sign_threshold', type=float, default=0.1, help='Threshold for pruning.')
parser.add_argument('--dist', type=str, default='uniform', choices=['kaiming_normal', 'normal', 'uniform'])
parser.add_argument('--percent', default=0, type=float, help='percentage of weight to prune')
parser.add_argument('--prune_method', default='magnitude', choices=['random', 'magnitude'])
parser.add_argument('--prune_layer', default='add', choices=['shift', 'add', 'all'])
model1 = shufflenet_v2_x0_5()
input = torch.randn(1, 3, 224, 224)
pms.summary(model1, torch.zeros((1, 3, 224, 224)), batch_size=1, show_hierarchical=False, print_summary=True)
macs, params = profile(model1, inputs=(input, ))
macs, params = clever_format([macs, params], "%.3f")
print(macs)
print(params)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if not os.path.exists(args.save):
os.makedirs(args.save)
cudnn.benchmark = True
gpu = args.gpu_ids
gpu_ids = args.gpu_ids.split(',')
args.gpu_ids = []
for gpu_id in gpu_ids:
id = int(gpu_id)
args.gpu_ids.append(id)
#print(args.gpu_ids)
#if len(args.gpu_ids) > 0:
# torch.cuda.set_device(args.gpu_ids[0])
if args.distributed:
os.environ['MASTER_PORT'] = args.port
torch.distributed.init_process_group(backend="nccl")
kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {}
if args.dataset == 'cifar10':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10/cifar-10-batches-py', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data.cifar10/cifar-10-batches-py', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
elif args.dataset == 'cifar100':
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
elif args.dataset == 'mnist':
trainset = datasets.MNIST('../MNIST', download=True, train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]
)
)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)
testset = datasets.MNIST('../MNIST', download=True, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]
)
)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=True, num_workers=4)
else:
# Data loading code
DATA = Path("D:/datasets/imagenet-mini")
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.Resize(64),
transforms.RandomResizedCrop(64),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(DATA / 'train', transform_train)
test_dataset = datasets.ImageFolder(DATA / 'val', transform_test)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=6, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.test_batch_size, shuffle=False,
pin_memory=True, num_workers=6)
#traindir = os.path.join(args.data, 'train')
#valdir = os.path.join(args.data, 'val')
#train_dataset = datasets.ImageFolder(
# traindir,
# transforms.Compose([
# transforms.RandomResizedCrop(64),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ]))
#test_loader = torch.utils.data.DataLoader(
# datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Resize(64),
# transforms.CenterCrop(64),
# transforms.ToTensor(),
# normalize,
# ])),
# batch_size=args.test_batch_size, shuffle=False,
# num_workers=16, pin_memory=True)
if args.dataset == 'imagenet':
num_classes = 1000
model = models.__dict__['resnet50'](num_classes=1000, quantize=args.add_quant, weight_bits=args.add_bits)
elif args.dataset == 'cifar10':
num_classes = 10
# model = models.shufflenet_shiftadd.ghostnet(num_classes=num_classes, pretrained=False)
#model = shufflenet_v2_x0_5()
#model = models.shufflenet_shiftadd_ghost.ghostnet(num_classes=num_classes, pretrained=False)
#model = models.resnet_backbone.resnet18(num_classes=10)
model = models.wideres.Wide_ResNet(28, 10, 0.3, 10)
#model = models.vgg_shiftadd.vgg16_nd_ss()
#model = models.resnet20_shiftadd_ghost.resnet20_shift(num_classes=num_classes, quantize=args.add_quant, weight_bits=args.add_bits, quantize_v=args.quantize_v)
# model = models.mobilenet_shiftadd_ghost.ghostnet(num_classes=num_classes, kernel_size=3, quantize=args.add_quant, weight_bits=args.add_bits, quantize_v=args.quantize_v)
#model = models.resnet20_shiftadd_ghost.resnet20_shiftadd_ghost(num_classes=num_classes, quantize=args.add_quant, weight_bits=args.add_bits, quantize_v=args.quantize_v)
#model = models.__dict__[args.arch](num_classes=num_classes, quantize=args.add_quant, weight_bits=args.add_bits, quantize_v=args.quantize_v)
# model = models.__dict__[args.arch](threshold = args.threshold, sign_threshold = args.sign_threshold, distribution = args.dist, num_classes=10, quantize=args.add_quant, weight_bits=args.add_bits)
elif args.dataset == 'cifar100':
num_classes = 100
model = models.__dict__[args.arch](num_classes=100, quantize=args.add_quant, weight_bits=args.add_bits, quantize_v=args.quantize_v)
elif args.dataset == 'mnist':
model = models.__dict__[args.arch](num_classes=10, quantize=args.add_quant, weight_bits=args.add_bits)
else:
raise NotImplementedError('No such dataset!')
#print(model)
#M = []
#N = []
#K = []
#S = []
#C = []
#size = 32
#for m in model.modules():
# if isinstance(m, nn.Conv2d):
# M.append(m.weight.shape[0])
# N.append(m.weight.shape[1])
# K.append(m.weight.shape[2])
# S.append(m.stride[0])
# C.append(int(size))
# if S[-1] == 2:
# size /= 2
#print('M', M)
#print('N', N)
#print('K', K)
#print('S', S)
#print('C', C)
#print(len(M))
#for i in range(len(M)):
# print('const int M{} = {}, N{} = {}, K{} = {}, S{} = {}, C{} = {};'.format(
# i, M[i], i, N[i], i, K[i], i, S[i], i, C[i]))
# print('const int H{} = C{} - S{} + K{};'.format(i, i, i, i))
#exit()
#best_prec1 = None
#shift_depth = []
#if best_prec1 is None: # no pretrain
# if 'shift' in args.arch:
# model, conversion_count = convert_to_shift(model, args.shift_depth, args.shift_type, convert_weights=False, use_kernel=args.use_kernel, rounding=args.rounding,
# weight_bits=args.weight_bits, sign_threshold_ps=args.sign_threshold_ps, quant_bits=args.shift_quant_bits)
#else:
# if 'shift' in args.arch:
# model, conversion_count = convert_to_shift(model, shift_depth, args.shift_type, convert_weights=False,
# use_kernel=args.use_kernel, rounding=args.rounding,
# weight_bits=args.weight_bits,
# sign_threshold_ps=args.sign_threshold_ps,
# quant_bits=args.shift_quant_bits)
if args.cuda:
model.cuda()
if len(args.gpu_ids) > 1:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=args.gpu_ids)
else:
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
# create optimizer
model_other_params = []
model_sign_params = []
model_shift_params = []
for name, param in model.named_parameters():
if(name.endswith(".sign")):
model_sign_params.append(param)
elif(name.endswith(".shift")):
model_shift_params.append(param)
else:
model_other_params.append(param)
params_dict = [
{"params": model_other_params},
{"params": model_sign_params, 'lr': args.lr_sign if args.lr_sign is not None else args.lr, 'weight_decay': 0},
{"params": model_shift_params, 'lr': args.lr, 'weight_decay': 0}
]
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer = None
if (args.optimizer.lower() == "sgd"):
optimizer = torch.optim.SGD(params_dict, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "adadelta"):
optimizer = torch.optim.Adadelta(params_dict, args.lr, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "adagrad"):
optimizer = torch.optim.Adagrad(params_dict, args.lr, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "adam"):
optimizer = torch.optim.Adam(params_dict, args.lr, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "rmsprop"):
optimizer = torch.optim.RMSprop(params_dict, args.lr, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "radam"):
optimizer = optim.RAdam(params_dict, args.lr, weight_decay=args.weight_decay)
elif (args.optimizer.lower() == "ranger"):
optimizer = optim.Ranger(params_dict, args.lr, weight_decay=args.weight_decay)
else:
raise ValueError("Optimizer type: ", args.optimizer, " is not supported or known")
schedule_cosine_lr_decay = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0, last_epoch=-1)
scheduler_cyclic_cosine_lr_decay = CyclicCosAnnealingLR(optimizer, milestones=[40,60,80,100,140,180,200,240,280,300,340,400], decay_milestones=[100, 200, 300, 400], eta_min=0)
def save_checkpoint(state, is_best, epoch, filepath):
if epoch == 'init':
filepath = os.path.join(filepath, 'init.pth.tar')
torch.save(state, filepath)
else:
# filename = os.path.join(filepath, 'ckpt'+str(epoch)+'.pth.tar')
# torch.save(state, filename)
filename = os.path.join(filepath, 'ckpt.pth.tar')
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(filepath, 'shufflenet_ghost2sa3.pth.tar'))
def load_add_state_dict(state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'weight' in k and not 'bn' in k and not 'fc' in k:
if k == 'conv1.weight' or 'downsample.1' in k:
new_state_dict[k] = v
continue
k = k[:-6] + 'adder'
# print(k)
new_state_dict[k] = v
return new_state_dict
def load_shiftadd_state_dict(state_dict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'weight' in k and not 'bn' in k and not 'fc' in k:
if k == 'conv1.weight' or 'downsample.2' in k:
new_state_dict[k] = v
continue
k = k[:-6] + 'adder'
# print(k)
new_state_dict[k] = v
return new_state_dict
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cpu')
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
try:
try:
model.load_state_dict(checkpoint['state_dict'],strict=False)
except:
model.load_state_dict(load_add_state_dict(checkpoint['state_dict']),strict=False)
except:
model.load_state_dict(load_shiftadd_state_dict(checkpoint['state_dict']),strict=False)
if not args.eval_only:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}"
.format(args.resume, checkpoint['epoch'], best_prec1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
save_checkpoint({'state_dict': model.state_dict()}, False, epoch='init', filepath=args.save)
#inp = torch.rand(1,3,224,224).to(device)
#count_ops(model, inp)
#print(count_ops())
#exit()
# print("WARNING: The summary function reports duplicate parameters for multi-GPU case")
#except:
# print("WARNING: Unable to obtain summary of model")
# save name
# name model sub-directory "shift_all" if all layers are converted to shift layers
conv2d_layers_count = count_layer_type(model, nn.Conv2d) #+ count_layer_type(model, unoptimized.UnoptimizedConv2d)
linear_layers_count = count_layer_type(model, nn.Linear) #+ count_layer_type(model, unoptimized.UnoptimizedLinear)
#print(conv2d_layers_count)
if (args.shift_depth > 0):
if (args.shift_type == 'Q'):
shift_label = "shift_q"
else:
shift_label = "shift_ps"
else:
shift_label = "shift"
# if (conv2d_layers_count==0 and linear_layers_count==0):
if conv2d_layers_count == 0:
shift_label += "_all"
else:
shift_label += "_%s" % (args.shift_depth)
if (args.shift_depth > 0):
shift_label += "_wb_%s" % (args.weight_bits)
if args.add_quant:
shift_label += '_add-{}'.format(args.add_bits)
if args.sign_threshold_ps:
shift_label += '_ps_thre-{}'.format(args.sign_threshold_ps)
args.save = os.path.join(args.save, shift_label)
if not os.path.exists(args.save):
os.makedirs(args.save)
history_score = np.zeros((args.epochs, 7))
#history_score1 = np.zeros((args.epochs, 1))
def visualize(feat, labels, epoch):
plt.ion()
c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.clf()
for i in range(10):
plt.plot(feat[labels == i, 0], feat[labels == i, 1], '.', c=c[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc = 'upper right')
# plt.xlim(xmin=-8,xmax=8)
# plt.ylim(ymin=-8,ymax=8)
# plt.text(-7.8,7.3,"epoch=%d" % epoch)
plt.title("epoch=%d" % epoch)
vis_dir = os.path.join(args.save, 'visualization')
if not os.path.exists(vis_dir):
os.makedirs(vis_dir)
plt.savefig(vis_dir+'/epoch=%d.jpg' % epoch)
plt.draw()
plt.pause(0.001)
def accuracy(output, target, topk=(1, 5)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
#if 'shift' in args.arch:
if args.shift_type == 'Q':
shift_module = deepshift.modules_q.Conv2dShiftQ
elif args.shift_type == 'PS':
shift_module = deepshift.modules.Conv2dShift
else:
raise NotImplementedError
global total_add
total_add = 0
for m in model.modules():
if isinstance(m, Adder2D):
total_add += m.adder.data.numel()
global total
total = 0
def get_shift_range(model):
if 'shift' in args.arch:
# pruning
if args.shift_type == 'Q':
total = 0
for m in model.modules():
if isinstance(m, shift_module):
total += m.weight.data.numel()
shift_weights = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, shift_module):
size = m.weight.data.numel()
shift_weights[index:(index+size)] = m.weight.data.view(-1).abs().clone()
#print(shift_weights)
index += size
y, i = torch.sort(shift_weights)
thre_index = int(total * percent)
thre = y[thre_index] - 1e-7
weight_unique = torch.unique(shift_weights)
#print(weight_unique)
print('shift_range:', weight_unique.size()[0]-1)
elif args.shift_type == 'PS':
total = 0
for m in model.modules():
if isinstance(m, shift_module):
total += m.sign.data.numel()
sign_weights = torch.zeros(total)
shift_weights = torch.zeros(total)
index = 0
for m in model.modules():
if isinstance(m, shift_module):
size = m.sign.data.numel()
sign_weights[index:(index+size)] = m.sign.data.view(-1).abs().clone()
shift_weights[index:(index+size)] = m.shift.data.view(-1).abs().clone()
index += size
y, i = torch.sort(shift_weights)
print('y is:', len(y))
print('i is:', len(i))
shift_unique = torch.unique(shift_weights)
print('shift range:', shift_unique.size()[0]-1)
left_shift_weight = int(torch.sum(shift_weights != 0))
left_shift_mask = int(torch.sum(sign_weights != 0))
# print('pruning ratio:', (1 - left_shift_mask / float(total)) * 100, '%')
print('left mask:', left_shift_mask)
print('left weights:', left_shift_weight)
print('total shift:', total)
history_score[epoch][5] = left_shift_mask
def get_adder_sparsity(model):
#if args.add_sparsity == 0:
# print('no sparisty in adder layer.')
if 'add' in args.arch:
adder_masks = torch.zeros(total_add)
index = 0
for m in model.modules():
if isinstance(m, Adder2D):
size = m.adder.data.numel()
adder_masks[index:(index+size)] = m.adder.data.view(-1).abs().clone()
index += size
left_adder_mask = int(torch.sum(adder_masks != 0))
print('left adder mask', left_adder_mask)
# print('Add sparsity ratio:', (1 - left_adder_mask / float(total_add)) * 100, '%')
print('total adders:', total_add)
history_score[epoch][6] = left_adder_mask
from deepshift import utils
def build(self):
for name, self in model.named_modules():
if isinstance(self, deepshift.modules.Conv2dShift):
self.shift_grad = torch.zeros_like(self.shift.data)
self.shift_mean_grad = torch.zeros_like(self.shift.data)
self.sign_grad = torch.zeros_like(self.shift.data)
self.sign_mean_grad = torch.zeros_like(self.shift.data)
self.shift_sum = torch.zeros_like(self.shift.data)
self.shift_mask = torch.zeros_like(self.shift.data)
if isinstance(self, Adder2D):
self.adder_grad = torch.zeros_like(self.adder.data)
self.adder_mean_grad = torch.zeros_like(self.adder.data)
self.adder_mask = torch.zeros_like(self.adder.data)
self.adder_sum = torch.zeros_like(self.adder.data)
def create_mask(shape, rate):
mask = torch.cuda.FloatTensor(shape).uniform_() > rate
return mask + 0
def train(epoch):
model.train()
global history_score
avg_loss = 0.
train_acc = 0.
pruned = 0.
end_time = time.time()
feat_loader = []
idx_loader = []
# batch_time = time.time()
start_time = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
# print('total time for one batch: {}'.format(time.time()-batch_time))
# batch_time = time.time()
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if args.cuda:
data, target = data.cuda(), target.cuda()
# with torch.no_grad():
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
avg_loss += loss.item()
prec1, prec5 = accuracy(output.data, target.data, topk=(1, 5))
train_acc += prec1.item()
loss.backward()
#for name, m in model.named_modules():
# if isinstance(m, deepshift.modules.Conv2dShift):
# print(m.shift.data)
# sign = m.sign.data
# sign[sign < -0.2] = -1
# sign[sign > 0.2] = 1
# sign[(-0.2 <= sign) & (sign <= 0.2)] = 0
optimizer.step()
#for name, m in model.named_modules():
# if isinstance(m, deepshift.modules.Conv2dShift):
# m.shift.data = m.shift.data - 0.0008**(epoch+1)*torch.norm(m.shift.data).float().cuda()
# m.shift.data[m.shift.data.abs() <= m.shift_mask.abs()] = 0.0
#m.shift_mask = m.shift.data.min().cuda()
# weight_copy = m.shift.data.abs().clone()
# mask = weight_copy.gt(0).float().cuda()
# m.shift.grad.data.mul_(mask)
# m.sign.grad.data.mul_(mask)
# m.shift.data.mul_(mask)
# m.sign.data.mul_(mask)
#if epoch == [0, args.epochs, 3]:
# m.shift_grad = m.shift.grad.data
# m.shift_mean_grad[m.shift_grad != m.shift_grad.mean().cuda()] = m.shift_grad.mean().cuda()
# if isinstance(m, Adder2D):
#m.mask = m.adder.data.min().cuda()
# adder_copy = m.adder.data.abs().clone()
# mask = adder_copy.gt(0).float().cuda()
# m.adder.grad.data.mul_(mask)
# m.adder.data.mul_(mask)
#if epoch == [0, args.epochs, 3]:
# m.adder_grad = m.adder.grad.data
# m.adder_mean_grad[m.adder_grad != m.adder_grad.mean().cuda()] = m.adder_grad.mean().cuda()
#torch.cuda.synchronize()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.1f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()))
Total_train_time = time.time() - start_time
history_score[epoch][0] = avg_loss / len(train_loader)
history_score[epoch][1] = np.round(train_acc / len(train_loader), 2)
history_score[epoch][3] = Total_train_time
print('total training time for one epoch: {}'.format(Total_train_time))
torch.cuda.synchronize()
def test():
model.eval()
test_loss = 0
test_acc = 0
test_acc_5 = 0
start_time = time.time()
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
with torch.no_grad():
#data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
prec1, prec5 = accuracy(output.data, target.data, topk=(1, 5))
test_acc += prec1.item()
test_acc_5 += prec5.item()
Total_test_time = time.time() - start_time
history_score[epoch][4] = Total_test_time
print('total test time for one epoch: {}'.format(Total_test_time))
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Prec1: {}/{} ({:.2f}%), Prec5: ({:.2f}%)\n'.format(
test_loss, test_acc, len(test_loader), test_acc / len(test_loader), test_acc_5 / len(test_loader)))
return np.round(test_acc / len(test_loader), 2), np.round(test_acc_5 / len(test_loader), 2), Total_test_time
best_prec1 = 0.
best_prec5 = 0.
best_prec1_p = 0.
best_prec1_p1 = 0.
percent = 0.1
percent_add = 0.05
Total_time =0
if __name__ == '__main__':
for epoch in range(args.start_epoch, args.epochs):
#for epoch in range(100):
if args.eval_only:
with torch.no_grad():
prec1, prec5, Total_test_time = test()
print('Prec1: {}; Prec5: {}'.format(prec1, prec5))
Total_time +=Total_test_time
best_prec1 = max(prec1, best_prec1)
Total_time= Total_time/(args.epochs-args.start_epoch)
print('Best accuracy',best_prec1)
print('Total_time',Total_time)
| 44.493653
| 198
| 0.651873
|
20407f8bd3c23fa5b4cf9c3b3fa4be0c7dce831b
| 35,276
|
py
|
Python
|
lib-python/2.4.1/test/regrtest.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
lib-python/2.4.1/test/regrtest.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
lib-python/2.4.1/test/regrtest.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
#! /usr/bin/env python
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-q: quiet -- don't print anything except if a test fails
-g: generate -- write the output file for a test instead of comparing it
-x: exclude -- arguments are tests to *exclude*
-s: single -- run only a single test (see below)
-r: random -- randomize test execution order
-f: fromfile -- read names of tests to run from a file (see below)
-l: findleaks -- if GC is available detect tests that leak memory
-u: use -- specify which special resource intensive tests to run
-h: help -- print this text and exit
-t: threshold -- call gc.set_threshold(N)
-T: coverage -- turn on code coverage using the trace module
-D: coverdir -- Directory where coverage files are put
-N: nocoverdir -- Put coverage files alongside modules
-L: runleaks -- run the leaks(1) command just before exit
-R: huntrleaks -- search for reference leaks (needs debug build, v. slow)
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
-T turns on code coverage tracing with the trace module.
-D specifies the directory where coverage files are put.
-N Put coverage files alongside modules.
-s means to run only a single test and exit. This is useful when
doing memory analysis on the Python interpreter (which tend to consume
too many resources to run the full regression test non-stop). The
file /tmp/pynexttest is read to find the next test to run. If this
file is missing, the first test_*.py file in testdir or on the command
line is used. (actually tempfile.gettempdir() is used instead of
/tmp).
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), so the minimal invocation is '-R ::'.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
bsddb - It is okay to run the bsddb testsuite, which takes
a long time to complete.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
compiler - Test the compiler package by compiling all the source
in the standard library and test suite. This takes
a long time.
subprocess Run all tests for the subprocess module.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
"""
import os
import sys
import getopt
import random
import warnings
import sre
import cStringIO
import traceback
# I see no other way to suppress these warnings;
# putting them in test_grammar.py has no effect:
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
".*test.test_grammar$")
if sys.maxint > 0x7fffffff:
# Also suppress them in <string>, because for 64-bit platforms,
# that's where test_grammar.py hides them.
warnings.filterwarnings("ignore", "hex/oct constants", FutureWarning,
"<string>")
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
from test import test_support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
'decimal', 'compiler', 'subprocess')
def usage(code, msg=''):
print __doc__
if msg: print msg
sys.exit(code)
def main(tests=None, testdir=None, verbose=0, quiet=False, generate=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, generate, exclude, single,
randomize, findleaks, use_resources, trace and coverdir) allow programmers
calling main() directly to set the values that would normally be set by
flags on the command line.
"""
test_support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvgqxsrf:lu:t:TD:NLR:',
['help', 'verbose', 'quiet', 'generate',
'exclude', 'single', 'random', 'fromfile',
'findleaks', 'use=', 'threshold=', 'trace',
'coverdir=', 'nocoverdir', 'runleaks',
'huntrleaks='
])
except getopt.error, msg:
usage(2, msg)
# Defaults
if use_resources is None:
use_resources = []
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-g', '--generate'):
generate = True
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-s', '--single'):
single = True
elif o in ('-r', '--randomize'):
randomize = True
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
coverdir = os.path.join(os.getcwd(), a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) != 3:
print a, huntrleaks
usage(2, '-R takes three colon-separated arguments')
if len(huntrleaks[0]) == 0:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if len(huntrleaks[1]) == 0:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks[2]) == 0:
huntrleaks[2] = "reflog.txt"
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage(1, 'Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
if generate and verbose:
usage(2, "-g and -v don't go together!")
if single and fromfile:
usage(2, "-s and -f don't go together!")
good = []
bad = []
skipped = []
resource_denieds = []
if findleaks:
try:
import gc
except ImportError:
print 'No GC available, disabling findleaks.'
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
from tempfile import gettempdir
filename = os.path.join(gettempdir(), 'pynexttest')
try:
fp = open(filename, 'r')
next = fp.read().strip()
tests = [next]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(fromfile)
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
if args:
args = map(removepy, args)
if tests:
tests = map(removepy, tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS[:]
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests[:0] = args
args = []
tests = tests or args or findtests(testdir, stdtests, nottests)
if single:
tests = tests[:1]
if randomize:
random.shuffle(tests)
if trace:
import trace
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
trace=False, count=True)
test_support.verbose = verbose # Tell tests to be moderately quiet
test_support.use_resources = use_resources
save_modules = sys.modules.keys()
for test in tests:
if not quiet:
print test
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, generate, verbose, quiet, testdir)',
globals=globals(), locals=vars())
else:
ok = runtest(test, generate, verbose, quiet, testdir, huntrleaks)
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
skipped.append(test)
if ok == -2:
resource_denieds.append(test)
if findleaks:
gc.collect()
if gc.garbage:
print "Warning: test created", len(gc.garbage),
print "uncollectable object(s)."
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
test_support.unload(module)
# The lists won't be sorted if running with -r
good.sort()
bad.sort()
skipped.sort()
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if verbose:
print "CAUTION: stdout isn't compared in verbose mode:"
print "a test that passes in verbose mode may fail without it."
if bad:
print count(len(bad), "test"), "failed:"
printlist(bad)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:"
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print count(len(surprise), "skip"), \
"unexpected on", plat + ":"
printlist(surprise)
else:
print "Those skips are all expected on", plat + "."
else:
print "Ask someone to teach regrtest.py about which tests are"
print "expected to get skipped on", plat + "."
if single:
alltests = findtests(testdir, stdtests, nottests)
for i in range(len(alltests)):
if tests[0] == alltests[i]:
if i == len(alltests) - 1:
os.unlink(filename)
else:
fp = open(filename, 'w')
fp.write(alltests[i+1] + '\n')
fp.close()
break
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_operations',
'test_builtin',
'test_exceptions',
'test_types',
]
NOTTESTS = [
'test_support',
'test_future1',
'test_future2',
'test_future3',
]
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == os.extsep+"py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, quiet, testdir=None, huntrleaks=False):
"""Run a single test.
test -- the name of the test
generate -- if true, generate output, instead of running the test
and comparing it to a previously created output file
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
testdir -- test directory
"""
test_support.unload(test)
if not testdir:
testdir = findtestdir()
outputdir = os.path.join(testdir, "output")
outputfile = os.path.join(outputdir, test)
if verbose:
cfp = None
else:
cfp = cStringIO.StringIO()
if huntrleaks:
refrep = open(huntrleaks[2], "a")
try:
save_stdout = sys.stdout
try:
if cfp:
sys.stdout = cfp
print test # Output file starts with test name
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# Most tests run to completion simply as a side-effect of
# being imported. For the benefit of tests that can't run
# that way (like test_threaded_import), explicitly invoke
# their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
if huntrleaks:
# This code *is* hackish and inelegant, yes.
# But it seems to do the job.
import copy_reg
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
import gc
def cleanup():
import _strptime, urlparse, warnings, dircache
from distutils.dir_util import _path_created
_path_created.clear()
warnings.filters[:] = fs
gc.collect()
sre.purge()
_strptime._regex_cache.clear()
urlparse.clear_cache()
copy_reg.dispatch_table.clear()
copy_reg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
dircache.reset()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
reload(the_module)
deltas = []
repcount = huntrleaks[0] + huntrleaks[1]
print >> sys.stderr, "beginning", repcount, "repetitions"
print >> sys.stderr, \
("1234567890"*(repcount//10 + 1))[:repcount]
for i in range(repcount):
rc = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
cleanup()
deltas.append(sys.gettotalrefcount() - rc - 2)
print >>sys.stderr
if max(map(abs, deltas[-huntrleaks[1]:])) > 0:
print >>sys.stderr, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
print >>refrep, test, 'leaked', \
deltas[-huntrleaks[1]:], 'references'
# The end of the huntrleaks hackishness.
finally:
sys.stdout = save_stdout
except test_support.ResourceDenied, msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -2
except (ImportError, test_support.TestSkipped), msg:
if not quiet:
print test, "skipped --", msg
sys.stdout.flush()
return -1
except KeyboardInterrupt:
raise
except test_support.TestFailed, msg:
print "test", test, "failed --", msg
sys.stdout.flush()
return 0
except:
type, value = sys.exc_info()[:2]
print "test", test, "crashed --", str(type) + ":", value
sys.stdout.flush()
if verbose:
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
return 0
else:
if not cfp:
return 1
output = cfp.getvalue()
if generate:
if output == test + "\n":
if os.path.exists(outputfile):
# Write it since it already exists (and the contents
# may have changed), but let the user know it isn't
# needed:
print "output file", outputfile, \
"is no longer needed; consider removing it"
else:
# We don't need it, so don't create it.
return 1
fp = open(outputfile, "w")
fp.write(output)
fp.close()
return 1
if os.path.exists(outputfile):
fp = open(outputfile, "r")
expected = fp.read()
fp.close()
else:
expected = test + "\n"
if output == expected or huntrleaks:
return 1
print "test", test, "produced unexpected output:"
sys.stdout.flush()
reportdiff(expected, output)
sys.stdout.flush()
return 0
def reportdiff(expected, output):
import difflib
print "*" * 70
a = expected.splitlines(1)
b = output.splitlines(1)
sm = difflib.SequenceMatcher(a=a, b=b)
tuples = sm.get_opcodes()
def pair(x0, x1):
# x0:x1 are 0-based slice indices; convert to 1-based line indices.
x0 += 1
if x0 >= x1:
return "line " + str(x0)
else:
return "lines %d-%d" % (x0, x1)
for op, a0, a1, b0, b1 in tuples:
if op == 'equal':
pass
elif op == 'delete':
print "***", pair(a0, a1), "of expected output missing:"
for line in a[a0:a1]:
print "-", line,
elif op == 'replace':
print "*** mismatch between", pair(a0, a1), "of expected", \
"output and", pair(b0, b1), "of actual output:"
for line in difflib.ndiff(a[a0:a1], b[b0:b1]):
print line,
elif op == 'insert':
print "***", pair(b0, b1), "of actual output doesn't appear", \
"in expected output after line", str(a1)+":"
for line in b[b0:b1]:
print "+", line,
else:
print "get_opcodes() returned bad tuple?!?!", (op, a0, a1, b0, b1)
print "*" * 70
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def removepy(name):
if name.endswith(os.extsep + "py"):
name = name[:-3]
return name
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
print fill(' '.join(map(str, x)), width,
initial_indent=blanks, subsequent_indent=blanks)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_normalization
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_normalization.skip_expected
# controls that.
# test_socket_ssl
# Controlled by test_socket_ssl.skip_expected. Requires the network
# resource, and a socket module with ssl support.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
# test_codecmaps_*
# Whether a skip is expected here depends on whether a large test
# input file has been downloaded. test_codecmaps_*.skip_expected
# controls that.
_expectations = {
'win32':
"""
test__locale
test_applesingle
test_al
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_mhlib
test_nis
test_openpty
test_ossaudiodev
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_threadsignals
test_timing
""",
'linux2':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_nis
test_ntpath
test_ossaudiodev
test_sunaudiodev
""",
'mac':
"""
test_al
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_bz2
test_cd
test_cl
test_commands
test_crypt
test_curses
test_dbm
test_dl
test_fcntl
test_fork1
test_gl
test_grp
test_ioctl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_ossaudiodev
test_poll
test_popen
test_popen2
test_posix
test_pty
test_pwd
test_resource
test_signal
test_sunaudiodev
test_sundry
test_tarfile
test_timing
""",
'unixware7':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'openunix8':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_sundry
""",
'sco_sv3':
"""
test_al
test_applesingle
test_asynchat
test_bsddb
test_bsddb185
test_cd
test_cl
test_dl
test_fork1
test_gettext
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_queue
test_sax
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
""",
'riscos':
"""
test_al
test_applesingle
test_asynchat
test_atexit
test_bsddb
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_crypt
test_dbm
test_dl
test_fcntl
test_fork1
test_gdbm
test_gl
test_grp
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mmap
test_nis
test_ntpath
test_openpty
test_poll
test_popen2
test_pty
test_pwd
test_strop
test_sunaudiodev
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
test_timing
""",
'darwin':
"""
test__locale
test_al
test_bsddb
test_bsddb3
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_ossaudiodev
test_poll
test_sunaudiodev
""",
'sunos5':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dbm
test_gdbm
test_gl
test_gzip
test_imgfile
test_linuxaudiodev
test_openpty
test_zipfile
test_zlib
""",
'hp-ux11':
"""
test_al
test_applesingle
test_bsddb
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_gzip
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_minidom
test_nis
test_ntpath
test_openpty
test_pyexpat
test_sax
test_sunaudiodev
test_zipfile
test_zlib
""",
'atheos':
"""
test_al
test_applesingle
test_bsddb185
test_cd
test_cl
test_curses
test_dl
test_gdbm
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_locale
test_mhlib
test_mmap
test_nis
test_poll
test_popen2
test_resource
test_sunaudiodev
""",
'cygwin':
"""
test_al
test_applesingle
test_bsddb185
test_bsddb3
test_cd
test_cl
test_curses
test_dbm
test_gl
test_imgfile
test_ioctl
test_largefile
test_linuxaudiodev
test_locale
test_nis
test_ossaudiodev
test_socketserver
test_sunaudiodev
""",
'os2emx':
"""
test_al
test_applesingle
test_audioop
test_bsddb185
test_bsddb3
test_cd
test_cl
test_commands
test_curses
test_dl
test_gl
test_imgfile
test_largefile
test_linuxaudiodev
test_mhlib
test_mmap
test_nis
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
test_sunaudiodev
""",
'freebsd4':
"""
test_aepack
test_al
test_applesingle
test_bsddb
test_bsddb3
test_cd
test_cl
test_gdbm
test_gl
test_imgfile
test_linuxaudiodev
test_locale
test_macfs
test_macostools
test_nis
test_normalization
test_ossaudiodev
test_pep277
test_plistlib
test_pty
test_scriptpackages
test_socket_ssl
test_socketserver
test_sunaudiodev
test_tcl
test_timeout
test_unicode_file
test_urllibnet
test_winreg
test_winsound
""",
}
_expectations['freebsd5'] = _expectations['freebsd4']
_expectations['freebsd6'] = _expectations['freebsd4']
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_normalization
from test import test_socket_ssl
from test import test_timeout
from test import test_codecmaps_cn, test_codecmaps_jp
from test import test_codecmaps_kr, test_codecmaps_tw
from test import test_codecmaps_hk
self.valid = False
if sys.platform in _expectations:
s = _expectations[sys.platform]
self.expected = set(s.split())
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
if test_normalization.skip_expected:
self.expected.add('test_normalization')
if test_socket_ssl.skip_expected:
self.expected.add('test_socket_ssl')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
for cc in ('cn', 'jp', 'kr', 'tw', 'hk'):
if eval('test_codecmaps_' + cc).skip_expected:
self.expected.add('test_codecmaps_' + cc)
if sys.maxint == 9223372036854775807L:
self.expected.add('test_rgbimg')
self.expected.add('test_imageop')
if not sys.platform in ("mac", "darwin"):
MAC_ONLY = ["test_macostools", "test_macfs", "test_aepack",
"test_plistlib", "test_scriptpackages"]
for skip in MAC_ONLY:
self.expected.add(skip)
if sys.platform != "win32":
WIN_ONLY = ["test_unicode_file", "test_winreg",
"test_winsound"]
for skip in WIN_ONLY:
self.expected.add(skip)
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. This
# prevents relative imports from working, and relative imports will screw
# up the testing framework. E.g. if both test.test_support and
# test_support are imported, they will not contain the same globals, and
# much of the testing framework relies on the globals in the
# test.test_support module.
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = pathlen = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
if len(sys.path) == pathlen:
print 'Could not find %r in sys.path to remove it' % mydir
main()
| 30.648132
| 79
| 0.56177
|
d66f02c27f8a678dcab009c8776f8c3cce8afe62
| 2,584
|
py
|
Python
|
2018/06/danger.py
|
stacybrock/advent-of-code
|
781e097bbd506faa4b05ebd844f5641127a6cd08
|
[
"MIT"
] | null | null | null |
2018/06/danger.py
|
stacybrock/advent-of-code
|
781e097bbd506faa4b05ebd844f5641127a6cd08
|
[
"MIT"
] | null | null | null |
2018/06/danger.py
|
stacybrock/advent-of-code
|
781e097bbd506faa4b05ebd844f5641127a6cd08
|
[
"MIT"
] | null | null | null |
# solution to Advent of Code 2018, day 6 part one and two
# https://adventofcode.com/2018/day/6
#
# assumes puzzle input is in a file called input.txt
import fileinput
from collections import defaultdict
from operator import itemgetter
def main():
raw_coordinates = list(line.strip() for line in fileinput.input())
# convert 'x, y' formatted coordinates into tuples of ints
coords = [tuple(map(int,c.split(", "))) for c in raw_coordinates]
# calculate outer boundary of grid
min_x = min(coords, key=itemgetter(0))[0]
max_x = max(coords, key=itemgetter(0))[0]
min_y = min(coords, key=itemgetter(1))[1]
max_y = max(coords, key=itemgetter(1))[1]
# create sets of coordinates that lie on the outer boundary
leftedge = set([c for c in coords if c[0] == min_x])
rightedge = set([c for c in coords if c[0] == max_x])
topedge = set([c for c in coords if c[1] == min_y])
bottomedge = set([c for c in coords if c[1] == max_y])
# use set math to determine the coordinates that lie
# within the outermost edges
interior_coords = set(coords) - leftedge - rightedge - topedge - bottomedge
# grid is a dict-of-dicts containing the closest coordinate
# to that point
# {0: {0: (x, y)}}
grid = defaultdict(dict)
for x in range(min_x, max_x+1):
for y in range(min_y, max_y+1):
distances = {}
for c in coords:
distances[c] = manhattan_distance((x, y), c)
(candidate, distance) = min(distances.items(), key=itemgetter(1))
if list(distances.values()).count(distance) == 1:
grid[x][y] = candidate
else:
grid[x][y] = None
# solve part one
coordinate_areas = list(
[sum([1 for x, y_val in grid.items() for c2 in y_val.values() if c2 == c]) for c in interior_coords]
)
print(f"Size of largest area: {max(coordinate_areas)}")
# solve part two
grid = defaultdict(dict)
for x in range(min_x, max_x+1):
for y in range(min_y, max_y+1):
total_distance = sum([manhattan_distance((x, y), c) for c in coords])
if total_distance < 10000:
grid[x][y] = 1
area = sum([sum(list(y_val.values())) for x, y_val in grid.items()])
print(f"Size of < 10000 region: {area}")
def manhattan_distance(a, b):
"""Calculate Manhattan distance between two coordinates
Inputs:
a - tuple for point 1
b - tuple for point 2
"""
return abs(a[0] - b[0]) + abs(a[1] - b[1])
if __name__ == '__main__':
main()
| 34.453333
| 108
| 0.614551
|
268d96d52e584d409a63a1d739e2a3208ae481ce
| 10,394
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/elb_application_lb_facts.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 1
|
2021-04-02T08:08:39.000Z
|
2021-04-02T08:08:39.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/elb_application_lb_facts.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/elb_application_lb_facts.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 1
|
2020-05-03T01:13:16.000Z
|
2020-05-03T01:13:16.000Z
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: elb_application_lb_facts
short_description: Gather facts about application ELBs in AWS
description:
- Gather facts about application ELBs in AWS
version_added: "2.4"
requirements: [ boto3 ]
author: Rob White (@wimnat)
options:
load_balancer_arns:
description:
- The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.
required: false
names:
description:
- The names of the load balancers.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all target groups
- elb_application_lb_facts:
# Gather facts about the target group attached to a particular ELB
- elb_application_lb_facts:
load_balancer_arns:
- "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff"
# Gather facts about a target groups named 'tg1' and 'tg2'
- elb_application_lb_facts:
names:
- elb1
- elb2
'''
RETURN = '''
load_balancers:
description: a list of load balancers
returned: always
type: complex
contains:
access_logs_s3_bucket:
description: The name of the S3 bucket for the access logs.
returned: when status is present
type: string
sample: mys3bucket
access_logs_s3_enabled:
description: Indicates whether access logs stored in Amazon S3 are enabled.
returned: when status is present
type: string
sample: true
access_logs_s3_prefix:
description: The prefix for the location in the S3 bucket.
returned: when status is present
type: string
sample: /my/logs
availability_zones:
description: The Availability Zones for the load balancer.
returned: when status is present
type: list
sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]"
canonical_hosted_zone_id:
description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.
returned: when status is present
type: string
sample: ABCDEF12345678
created_time:
description: The date and time the load balancer was created.
returned: when status is present
type: string
sample: "2015-02-12T02:14:02+00:00"
deletion_protection_enabled:
description: Indicates whether deletion protection is enabled.
returned: when status is present
type: string
sample: true
dns_name:
description: The public DNS name of the load balancer.
returned: when status is present
type: string
sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com
idle_timeout_timeout_seconds:
description: The idle timeout value, in seconds.
returned: when status is present
type: string
sample: 60
ip_address_type:
description: The type of IP addresses used by the subnets for the load balancer.
returned: when status is present
type: string
sample: ipv4
load_balancer_arn:
description: The Amazon Resource Name (ARN) of the load balancer.
returned: when status is present
type: string
sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455
load_balancer_name:
description: The name of the load balancer.
returned: when status is present
type: string
sample: my-elb
scheme:
description: Internet-facing or internal load balancer.
returned: when status is present
type: string
sample: internal
security_groups:
description: The IDs of the security groups for the load balancer.
returned: when status is present
type: list
sample: ['sg-0011223344']
state:
description: The state of the load balancer.
returned: when status is present
type: dict
sample: "{'code': 'active'}"
tags:
description: The tags attached to the load balancer.
returned: when status is present
type: dict
sample: "{
'Tag': 'Example'
}"
type:
description: The type of load balancer.
returned: when status is present
type: string
sample: application
vpc_id:
description: The ID of the VPC for the load balancer.
returned: when status is present
type: string
sample: vpc-0011223344
'''
import traceback
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def get_elb_listeners(connection, module, elb_arn):
try:
return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_listener_rules(connection, module, listener_arn):
try:
return connection.describe_rules(ListenerArn=listener_arn)['Rules']
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def get_load_balancer_attributes(connection, module, load_balancer_arn):
try:
load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# Replace '.' with '_' in attribute key names to make it more Ansibley
for k, v in list(load_balancer_attributes.items()):
load_balancer_attributes[k.replace('.', '_')] = v
del load_balancer_attributes[k]
return load_balancer_attributes
def get_load_balancer_tags(connection, module, load_balancer_arn):
try:
return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])
except ClientError as e:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
def list_load_balancers(connection, module):
load_balancer_arns = module.params.get("load_balancer_arns")
names = module.params.get("names")
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
if not load_balancer_arns and not names:
load_balancers = load_balancer_paginator.paginate().build_full_result()
if load_balancer_arns:
load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()
if names:
load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()
except ClientError as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
module.exit_json(load_balancers=[])
else:
module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except NoCredentialsError as e:
module.fail_json(msg="AWS authentication problem. " + e.message, exception=traceback.format_exc())
for load_balancer in load_balancers['LoadBalancers']:
# Get the attributes for each elb
load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))
# Get the listeners for each elb
load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])
# For each listener, get listener rules
for listener in load_balancer['listeners']:
listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]
# Get tags for each load balancer
for snaked_load_balancer in snaked_load_balancers:
snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])
module.exit_json(load_balancers=snaked_load_balancers)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
load_balancer_arns=dict(type='list'),
names=dict(type='list')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=['load_balancer_arns', 'names'],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_load_balancers(connection, module)
if __name__ == '__main__':
main()
| 37.121429
| 160
| 0.670483
|
2c1458742461dfa240e506483652c644420aea3b
| 1,000
|
py
|
Python
|
tests/test_utils.py
|
tranquilitybase-io/tb-aws-dac
|
7ea6a2d1dc4d237e23aa457980d6f81817c8ffee
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
tranquilitybase-io/tb-aws-dac
|
7ea6a2d1dc4d237e23aa457980d6f81817c8ffee
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
tranquilitybase-io/tb-aws-dac
|
7ea6a2d1dc4d237e23aa457980d6f81817c8ffee
|
[
"Apache-2.0"
] | 1
|
2021-06-23T20:35:39.000Z
|
2021-06-23T20:35:39.000Z
|
import unittest
from unittest import TestCase
from gcpdac.utils import labellize
class Utils_Test(TestCase):
def test_labellize(self):
# google label rules here - https://cloud.google.com/compute/docs/labeling-resources
self.assertEqual("abc", labellize("abc"))
self.assertEqual("ab-c", labellize("ab c"))
self.assertEqual("ab-c", labellize("ab&c"))
self.assertEqual("ab_c", labellize("ab_c"))
self.assertEqual("ab-c", labellize("ab-c"))
self.assertEqual("abc", labellize("ABC"))
self.assertEqual("a123", labellize("123"))
self.assertEqual("a-123", labellize("-123"))
self.assertEqual("a-abc", labellize("-abc"))
self.assertEqual("a_123", labellize("_123"))
self.assertEqual("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvw",
labellize("abcdefghijklimnopqrstuvwxyz-0123456789_abcdefghijklimnopqrstuvwxyz"))
if __name__ == '__main__':
unittest.main()
| 38.461538
| 105
| 0.67
|
3e0738a9f7170c7a85d9deb2f310f2117e21188d
| 33
|
py
|
Python
|
pyrec/implicit/__init__.py
|
redbubble/pyrec
|
8f1a7ed73b69352f960f33e643eb20be7e74830a
|
[
"MIT"
] | 1
|
2019-10-17T02:09:18.000Z
|
2019-10-17T02:09:18.000Z
|
pyrec/implicit/__init__.py
|
redbubble/pyrec
|
8f1a7ed73b69352f960f33e643eb20be7e74830a
|
[
"MIT"
] | null | null | null |
pyrec/implicit/__init__.py
|
redbubble/pyrec
|
8f1a7ed73b69352f960f33e643eb20be7e74830a
|
[
"MIT"
] | null | null | null |
from .als import load_recommender
| 33
| 33
| 0.878788
|
149d12cd1fd9cece180b04a3b036de54b7acdda2
| 11,926
|
py
|
Python
|
e2e_tests/tests/experiment/test_metrics.py
|
seanr15/determined
|
f0eba7d43cf55a67832cc6001127c0b1e7326a9e
|
[
"Apache-2.0"
] | null | null | null |
e2e_tests/tests/experiment/test_metrics.py
|
seanr15/determined
|
f0eba7d43cf55a67832cc6001127c0b1e7326a9e
|
[
"Apache-2.0"
] | null | null | null |
e2e_tests/tests/experiment/test_metrics.py
|
seanr15/determined
|
f0eba7d43cf55a67832cc6001127c0b1e7326a9e
|
[
"Apache-2.0"
] | null | null | null |
import json
import multiprocessing as mp
from typing import Set, Union
import pytest
import determined_common.api.authentication as auth
from determined_common import api
from tests import config as conf
from tests import experiment as exp
@pytest.mark.e2e_cpu # type: ignore
@pytest.mark.timeout(600) # type: ignore
def test_streaming_metrics_api() -> None:
auth.initialize_session(conf.make_master_url(), try_reauth=True)
pool = mp.pool.ThreadPool(processes=7)
experiment_id = exp.create_experiment(
conf.fixtures_path("mnist_pytorch/adaptive_short.yaml"),
conf.tutorials_path("mnist_pytorch"),
)
# To fully test the streaming APIs, the requests need to start running immediately after the
# experiment, and then stay open until the experiment is complete. To accomplish this with all
# of the API calls on a single experiment, we spawn them all in threads.
metric_names_thread = pool.apply_async(request_metric_names, (experiment_id,))
train_metric_batches_thread = pool.apply_async(request_train_metric_batches, (experiment_id,))
valid_metric_batches_thread = pool.apply_async(request_valid_metric_batches, (experiment_id,))
train_trials_snapshot_thread = pool.apply_async(request_train_trials_snapshot, (experiment_id,))
valid_trials_snapshot_thread = pool.apply_async(request_valid_trials_snapshot, (experiment_id,))
train_trials_sample_thread = pool.apply_async(request_train_trials_sample, (experiment_id,))
valid_trials_sample_thread = pool.apply_async(request_valid_trials_sample, (experiment_id,))
metric_names_results = metric_names_thread.get()
train_metric_batches_results = train_metric_batches_thread.get()
valid_metric_batches_results = valid_metric_batches_thread.get()
train_trials_snapshot_results = train_trials_snapshot_thread.get()
valid_trials_snapshot_results = valid_trials_snapshot_thread.get()
train_trials_sample_results = train_trials_sample_thread.get()
valid_trials_sample_results = valid_trials_sample_thread.get()
if metric_names_results is not None:
pytest.fail("metric-names: %s. Results: %s" % metric_names_results)
if train_metric_batches_results is not None:
pytest.fail("metric-batches (training): %s. Results: %s" % train_metric_batches_results)
if valid_metric_batches_results is not None:
pytest.fail("metric-batches (validation): %s. Results: %s" % valid_metric_batches_results)
if train_trials_snapshot_results is not None:
pytest.fail("trials-snapshot (training): %s. Results: %s" % train_trials_snapshot_results)
if valid_trials_snapshot_results is not None:
pytest.fail("trials-snapshot (validation): %s. Results: %s" % valid_trials_snapshot_results)
if train_trials_sample_results is not None:
pytest.fail("trials-sample (training): %s. Results: %s" % train_trials_sample_results)
if valid_trials_sample_results is not None:
pytest.fail("trials-sample (validation): %s. Results: %s" % valid_trials_sample_results)
def request_metric_names(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/metric-names".format(experiment_id),
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
# First let's verify an empty response was sent back before any real work was done
if results[0]["searcherMetric"] != "validation_loss":
return ("unexpected searcher metric in first response", results)
if results[0]["trainingMetrics"] != []:
return ("unexpected training metric in first response", results)
if results[0]["validationMetrics"] != []:
return ("unexpected validation metric in first response", results)
# Then we verify that all expected responses are eventually received exactly once
accumulated_training = set()
accumulated_validation = set()
for i in range(1, len(results)):
for training in results[i]["trainingMetrics"]:
if training in accumulated_training:
return ("training metric appeared twice", results)
accumulated_training.add(training)
for validation in results[i]["validationMetrics"]:
if validation in accumulated_validation:
return ("training metric appeared twice", results)
accumulated_validation.add(validation)
if accumulated_training != {"loss"}:
return ("unexpected set of training metrics", results)
if accumulated_validation != {"validation_loss", "accuracy"}:
return ("unexpected set of validation metrics", results)
return None
def request_train_metric_batches(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/batches".format(experiment_id),
params={"metric_name": "loss", "metric_type": "METRIC_TYPE_TRAINING"},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
# First let's verify an empty response was sent back before any real work was done
if results[0]["batches"] != []:
return ("unexpected batches in first response", results)
# Then we verify that all expected responses are eventually received exactly once
accumulated = set()
for i in range(1, len(results)):
for batch in results[i]["batches"]:
if batch in accumulated:
return ("batch appears twice", results)
accumulated.add(batch)
if accumulated != {100, 200, 300, 400}:
return ("unexpected set of batches", results)
return None
def request_valid_metric_batches(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/batches".format(experiment_id),
params={"metric_name": "accuracy", "metric_type": "METRIC_TYPE_VALIDATION"},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
# First let's verify an empty response was sent back before any real work was done
if results[0]["batches"] != []:
return ("unexpected batches in first response", results)
# Then we verify that all expected responses are eventually received exactly once
accumulated = set()
for i in range(1, len(results)):
for batch in results[i]["batches"]:
if batch in accumulated:
return ("batch appears twice", results)
accumulated.add(batch)
if accumulated != {200, 400}:
return ("unexpected set of batches", results)
return None
def validate_hparam_types(hparams: dict) -> Union[None, str]:
for hparam in ["dropout1", "dropout2", "learning_rate"]:
if type(hparams[hparam]) != float:
return "hparam %s of unexpected type" % hparam
for hparam in ["global_batch_size", "n_filters1", "n_filters2"]:
if type(hparams[hparam]) != int:
return "hparam %s of unexpected type" % hparam
return None
def request_train_trials_snapshot(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/trials-snapshot".format(experiment_id),
params={
"metric_name": "loss",
"metric_type": "METRIC_TYPE_TRAINING",
"batches_processed": 100,
},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
# First let's verify an empty response was sent back before any real work was done
if results[0]["trials"] != []:
return ("unexpected trials in first response", results)
# Then we verify that we receive the expected number of trials and the right types
trials = set()
for i in range(1, len(results)):
for trial in results[i]["trials"]:
trials.add(trial["trialId"])
validate_hparam_types(trial["hparams"])
if type(trial["metric"]) != float:
return ("metric of unexpected type", results)
if len(trials) != 5:
return ("unexpected number of trials received", results)
return None
def request_valid_trials_snapshot(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/trials-snapshot".format(experiment_id),
params={
"metric_name": "accuracy",
"metric_type": "METRIC_TYPE_VALIDATION",
"batches_processed": 200,
},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
# First let's verify an empty response was sent back before any real work was done
if results[0]["trials"] != []:
return ("unexpected trials in first response", results)
# Then we verify that we receive the expected number of trials and the right types
trials = set()
for i in range(1, len(results)):
for trial in results[i]["trials"]:
trials.add(trial["trialId"])
hparam_error = validate_hparam_types(trial["hparams"])
if hparam_error is not None:
return (hparam_error, results)
if type(trial["metric"]) != float:
return ("metric of unexpected type", results)
if len(trials) != 5:
return ("unexpected number of trials received", results)
return None
def check_trials_sample_result(results: list) -> Union[None, tuple]:
# First let's verify an empty response was sent back before any real work was done
if (
results[0]["trials"] != []
or results[0]["promotedTrials"] != []
or results[0]["demotedTrials"] != []
):
return ("unexpected trials in first response", results)
# Then we verify that we receive the expected number of trials and the right types
trials: Set[int] = set()
datapoints = {}
for i in range(1, len(results)):
newTrials = set()
for trial in results[i]["promotedTrials"]:
if trial in trials:
return ("trial lists as promoted twice", results)
newTrials.add(trial)
datapoints[trial] = 0
for trial in results[i]["trials"]:
if trial["trialId"] in newTrials:
hparam_error = validate_hparam_types(trial["hparams"])
if hparam_error is not None:
return (hparam_error, results)
else:
if trial["hparams"] is not None:
return ("hparams repeated for trial", results)
for point in trial["data"]:
if point["batches"] > datapoints[trial["trialId"]]:
datapoints[trial["trialId"]] = point["batches"]
else:
return ("data received out of order: " + str(trial["trialId"]), results)
return None
def request_train_trials_sample(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/trials-sample".format(experiment_id),
params={
"metric_name": "loss",
"metric_type": "METRIC_TYPE_TRAINING",
},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
return check_trials_sample_result(results)
def request_valid_trials_sample(experiment_id): # type: ignore
response = api.get(
conf.make_master_url(),
"api/v1/experiments/{}/metrics-stream/trials-sample".format(experiment_id),
params={
"metric_name": "accuracy",
"metric_type": "METRIC_TYPE_VALIDATION",
},
)
results = [message["result"] for message in map(json.loads, response.text.splitlines())]
return check_trials_sample_result(results)
| 43.845588
| 100
| 0.670636
|
965bbc1f52c5fc0da021d85fdb3ffed45c7fd55b
| 606
|
py
|
Python
|
var/spack/repos/builtin/packages/py-cmocean/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-cmocean/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-cmocean/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyCmocean(PythonPackage):
"""Colormaps for Oceanography."""
homepage = "https://matplotlib.org/cmocean/"
pypi = "cmocean/cmocean-2.0.tar.gz"
version('2.0', sha256='13eea3c8994d8e303e32a2db0b3e686f6edfb41cb21e7b0e663c2b17eea9b03a')
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
| 33.666667
| 93
| 0.719472
|
a77048dbcd0e475a86560a19cc2b9d09a841d023
| 807
|
py
|
Python
|
chatrah/urls.py
|
Mukthar86/kushwanth
|
a9a18437d9728037606a141569eca5439c20c22f
|
[
"MIT"
] | 1
|
2022-02-09T06:00:01.000Z
|
2022-02-09T06:00:01.000Z
|
chatrah/urls.py
|
Mukthar86/kushwanth
|
a9a18437d9728037606a141569eca5439c20c22f
|
[
"MIT"
] | null | null | null |
chatrah/urls.py
|
Mukthar86/kushwanth
|
a9a18437d9728037606a141569eca5439c20c22f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include, re_path
from django.conf import settings
from django.conf.urls.static import static
admin.site.site_header = 'Chatrah'
admin.site.site_title = 'Chatrah'
admin.site.index_title = 'Chatrah Administration'
admin.empty_value_display = '**Empty**'
urlpatterns = [
path('admin/', admin.site.urls),
path('paper/', include('paper.urls')),
path('', include('users.urls')),
#path('', include('pwa.urls')),
path('grow/', include('oppurtunities.urls')),
path('resume/',include('resume.urls')),
path('dashboard/', include('dashboard.urls')),
path('academic/', include('academic.urls')),
path('summernote/', include('django_summernote.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.086957
| 65
| 0.703841
|
2cfd48132d875803ceb3bda75f35e00a5cd5c818
| 1,648
|
py
|
Python
|
cal_flops.py
|
devzhk/Implicit-Competitive-Regularization
|
71bda29f2db18d1d7ae9860e4a761ff61cbec756
|
[
"Apache-2.0"
] | 107
|
2019-10-15T15:55:20.000Z
|
2021-12-27T02:02:50.000Z
|
cal_flops.py
|
AzR919/Implicit-Competitive-Regularization
|
d69b66b1843cf72d94a1b0957aa30eb34e46af53
|
[
"Apache-2.0"
] | 11
|
2019-10-16T09:41:23.000Z
|
2020-12-16T08:44:38.000Z
|
cal_flops.py
|
AzR919/Implicit-Competitive-Regularization
|
d69b66b1843cf72d94a1b0957aa30eb34e46af53
|
[
"Apache-2.0"
] | 26
|
2019-10-15T03:38:41.000Z
|
2021-03-25T11:52:08.000Z
|
from train_utils import get_model
import torch
from ptflops import get_model_complexity_info
if __name__ == '__main__':
z_dim = 128
model_name= 'Resnet'
model_config = {'image_size': 64,
'image_channel': 3,
'feature_num': 128,
'n_extra_layers': 0,
'batchnorm_d': True,
'batchnorm_g': True}
with torch.cuda.device(0):
D, G = get_model(model_name=model_name, z_dim=z_dim, configs=model_config)
macsD, paramsD = get_model_complexity_info(D,
(model_config['image_channel'],
model_config['image_size'],
model_config['image_size']),
as_strings=True,
print_per_layer_stat=True,
verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macsD))
print('{:<30} {:<8}'.format('Number of parameters: ', paramsD))
macsG, paramsG = get_model_complexity_info(G,
(z_dim,),
as_strings=True,
print_per_layer_stat=True,
verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macsG))
print('{:<30} {:<8}'.format('Number of parameters: ', paramsG))
| 51.5
| 82
| 0.432039
|
f646d5f9f321e722d5ab661f7a905134270d6c67
| 1,470
|
py
|
Python
|
helper-functions/get_satellite_data.py
|
scottlittle/solar-sensors
|
c9bd496e6cb589a73a7340522c48a530d3131672
|
[
"Apache-2.0"
] | 1
|
2015-08-19T02:29:41.000Z
|
2015-08-19T02:29:41.000Z
|
helper-functions/get_satellite_data.py
|
scottlittle/solar-sensors
|
c9bd496e6cb589a73a7340522c48a530d3131672
|
[
"Apache-2.0"
] | null | null | null |
helper-functions/get_satellite_data.py
|
scottlittle/solar-sensors
|
c9bd496e6cb589a73a7340522c48a530d3131672
|
[
"Apache-2.0"
] | null | null | null |
# the directory where this is run must have the list.txt file that
# generates the local http files (http_files) with the command:
# "wget -i list.txt" at the command line. This command should be
# run before running this script
import pandas as pd
import os
import glob
import requests
mypath = "data/satellite/sfarea/" #"data/satellite/colorado/summer6months/"
http_files = filter(os.path.isfile, glob.glob(mypath + "*")) #local http files
http_files.sort(key=lambda x: os.path.getmtime(x)) #sorts by time
http_files = [os.path.basename(i) for i in http_files] #extract filename
my_list_file = http_files[0] #pop out the list used to generate files in this dir
http_files = http_files[1:] #keep what is not popped out
with open(mypath + my_list_file) as f: #find noaa http files
begin_urls = f.read().splitlines() #beginning part of urls to make downloads
len_begin_urls = len(begin_urls) #used for displaying completion percentage
for i, begin_url in enumerate(begin_urls):
print "------------- ", 100.0 * i/len_begin_urls, " percent done overall---------------"
df = pd.read_html(mypath + http_files[i],header=0)[0] #read local http file into df
filenames = []
for j in range(len(df)): #generate filenames from dataframe for data
filenames.append(df.loc[j, 'Name'])
for filename in filenames:
req = requests.get(begin_url + '/' + filename)
with open( mypath + 'data/' + filename , 'wb' ) as fout: #save data!
fout.write(req.content)
| 37.692308
| 89
| 0.719728
|
bd6ef4d1d10b52ec1a5f708ba3723cad0813e3f0
| 332
|
py
|
Python
|
images/migrations/0004_auto_20201126_2252.py
|
cebanauskes/ida_images
|
708eb44274b28d53f9b0422fbf3711d85ac62a6b
|
[
"MIT"
] | null | null | null |
images/migrations/0004_auto_20201126_2252.py
|
cebanauskes/ida_images
|
708eb44274b28d53f9b0422fbf3711d85ac62a6b
|
[
"MIT"
] | null | null | null |
images/migrations/0004_auto_20201126_2252.py
|
cebanauskes/ida_images
|
708eb44274b28d53f9b0422fbf3711d85ac62a6b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-11-26 19:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0003_auto_20201126_2228'),
]
operations = [
migrations.RenameModel(
old_name='ImageModel',
new_name='Image',
),
]
| 18.444444
| 47
| 0.596386
|
b682027b06dff29eea2d25f2d0f376ab9f8d7176
| 7,873
|
py
|
Python
|
test/functional/rpc_users.py
|
petegee/CnBCoin
|
0566ad715ff36c0a983741752bd2796ea2633010
|
[
"MIT"
] | 1
|
2021-05-16T06:17:44.000Z
|
2021-05-16T06:17:44.000Z
|
test/functional/rpc_users.py
|
petegee/CnBCoin
|
0566ad715ff36c0a983741752bd2796ea2633010
|
[
"MIT"
] | null | null | null |
test/functional/rpc_users.py
|
petegee/CnBCoin
|
0566ad715ff36c0a983741752bd2796ea2633010
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
rpcuser = "rpcuser=rpcuser💻"
rpcpassword = "rpcpassword=rpcpassword🔑"
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "cnbcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "cnbcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcuser+"\n")
f.write(rpcpassword+"\n")
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcauth tool
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Use new authpair to confirm both work
self.log.info('Correct...')
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rt's password
self.log.info('Wrong...')
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rt
self.log.info('Wrong...')
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for rt2
self.log.info('Correct...')
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for rt2
self.log.info('Wrong...')
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Correct for randomly generated user
self.log.info('Correct...')
authpairnew = self.user+":"+self.password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong password for randomly generated user
self.log.info('Wrong...')
authpairnew = self.user+":"+self.password+"Wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
# rpcuser and rpcpassword authpair
self.log.info('Correct...')
rpcuserauthpair = "rpcuser💻:rpcpassword🔑"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 200)
conn.close()
#Wrong login name with rpcuser's password
rpcuserauthpair = "rpcuserwrong:rpcpassword"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
#Wrong password for rpcuser
self.log.info('Wrong...')
rpcuserauthpair = "rpcuser:rpcpasswordwrong"
headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status, 401)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.033816
| 129
| 0.614505
|
7bad6ab9693c2e19c6a4520c75dd9601d6beee75
| 3,989
|
py
|
Python
|
lost_years/ssa.py
|
gojiplus/lost_years
|
b7af037827bbb478b8defb7db007bfc950d9ebfe
|
[
"MIT"
] | 5
|
2020-04-02T05:43:43.000Z
|
2020-04-02T18:26:13.000Z
|
lost_years/ssa.py
|
gojiplus/lost_years
|
b7af037827bbb478b8defb7db007bfc950d9ebfe
|
[
"MIT"
] | null | null | null |
lost_years/ssa.py
|
gojiplus/lost_years
|
b7af037827bbb478b8defb7db007bfc950d9ebfe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import pandas as pd
from pkg_resources import resource_filename
from .utils import column_exists, fixup_columns, closest
SSA_DATA = resource_filename(__name__, "data/ssa.csv")
SSA_COLS = ['age', 'male_life_expectancy', 'female_life_expectancy', 'year']
class LostYearsSSAData():
__df = None
@classmethod
def lost_years_ssa(cls, df, cols=None):
"""Appends Life expectancycolumn from SSA data to the input DataFrame
based on age, sex and year in the specific cols mapping
Args:
df (:obj:`DataFrame`): Pandas DataFrame containing the last name
column.
cols (dict or None): Column mapping for age, sex, and year
in DataFrame
(None for default mapping: {'age': 'age', 'sex': 'sex',
'year': 'year'})
Returns:
DataFrame: Pandas DataFrame with life expectency column(s):-
'ssa_age', 'ssa_year', 'ssa_life_expectancy'
"""
df_cols = {}
for col in ['age', 'sex', 'year']:
tcol = col if cols is None else cols[col]
if tcol not in df.columns:
print("No column `{0!s}` in the DataFrame".format(tcol))
return df
df_cols[col] = tcol
if cls.__df is None:
cls.__df = pd.read_csv(SSA_DATA, usecols=SSA_COLS)
out_df = pd.DataFrame()
for i, r in df.iterrows():
if r[df_cols['sex']].lower() in ['m', 'male']:
ecol = 'male_life_expectancy'
else:
ecol = 'female_life_expectancy'
sdf = cls.__df[['age', 'year', ecol]]
for c in ['age', 'year']:
sdf = sdf[sdf[c] == closest(sdf[c].unique(), r[df_cols[c]])]
odf = sdf[['age', 'year', ecol]].copy()
odf.columns = ['ssa_age', 'ssa_year', 'ssa_life_expectancy']
odf['index'] = i
out_df = pd.concat([out_df, odf])
out_df.set_index('index', drop=True, inplace=True)
rdf = df.join(out_df)
return rdf
lost_years_ssa = LostYearsSSAData.lost_years_ssa
def main(argv=sys.argv[1:]):
title = ('Appends Lost Years data column(s) by age, sex and year')
parser = argparse.ArgumentParser(description=title)
parser.add_argument('input', default=None,
help='Input file')
parser.add_argument('-a', '--age', default='age',
help='Columns name of age in the input file'
'(default=`age`)')
parser.add_argument('-s', '--sex', default='sex',
help='Columns name of sex in the input file'
'(default=`sex`)')
parser.add_argument('-y', '--year', default='year',
help='Columns name of year in the input file'
'(default=`year`)')
parser.add_argument('-o', '--output', default='lost-years-output.csv',
help='Output file with Lost Years data column(s)')
args = parser.parse_args(argv)
print(args)
df = pd.read_csv(args.input)
if not column_exists(df, args.age):
print("Column: `{0!s}` not found in the input file".format(args.age))
return -1
if not column_exists(df, args.sex):
print("Column: `{0!s}` not found in the input file".format(args.sex))
return -1
if not column_exists(df, args.year):
print("Column: `{0!s}` not found in the input file".format(args.year))
return -1
rdf = lost_years_ssa(df, cols={'age': args.age, 'sex': args.sex,
'year': args.year})
print("Saving output to file: `{0:s}`".format(args.output))
rdf.columns = fixup_columns(rdf.columns)
rdf.to_csv(args.output, index=False)
return 0
if __name__ == "__main__":
sys.exit(main())
| 34.991228
| 78
| 0.55628
|
536eceeb7fee1f8f93ced3a8638a937413d4cecb
| 1,162
|
py
|
Python
|
qiskit/_classicalregister.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | 1
|
2017-07-12T02:04:53.000Z
|
2017-07-12T02:04:53.000Z
|
qiskit/_classicalregister.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | null | null | null |
qiskit/_classicalregister.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | 6
|
2018-05-27T10:52:02.000Z
|
2021-04-02T19:20:11.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Classical register reference object.
Author: Andrew Cross
"""
from ._register import Register
class ClassicalRegister(Register):
"""Implement a classical register."""
def qasm(self):
"""Return OPENQASM string for this register."""
return "creg %s[%d];" % (self.name, self.size)
def __str__(self):
"""Return a string representing the register."""
return "ClassicalRegister(%s,%d)" % (self.name, self.size)
| 32.277778
| 79
| 0.658348
|
d67710f14549a3396ca9af109968823e049a849d
| 675
|
py
|
Python
|
ROAR/planning_module/mission_planner/mission_planner.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 18
|
2020-10-16T00:38:55.000Z
|
2022-03-03T06:01:49.000Z
|
ROAR/planning_module/mission_planner/mission_planner.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 20
|
2020-07-23T03:50:50.000Z
|
2021-11-09T04:00:26.000Z
|
ROAR/planning_module/mission_planner/mission_planner.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 140
|
2019-11-20T22:46:02.000Z
|
2022-03-29T13:26:17.000Z
|
from typing import List
import logging
from ROAR.utilities_module.data_structures_models import Transform
from collections import deque
from ROAR.planning_module.abstract_planner import AbstractPlanner
class MissionPlanner(AbstractPlanner):
def __init__(self, agent, **kwargs):
super().__init__(agent=agent,**kwargs)
self.logger = logging.getLogger(__name__)
self.mission_plan: deque = deque()
def run_in_series(self) -> List[Transform]:
"""
Abstract run step function for Mission Planner
Args:
vehicle: new vehicle state
Returns:
Plan for next steps
"""
return []
| 25.961538
| 66
| 0.677037
|
4b5562856173fe69e2883aed8b3d02680f362781
| 569
|
py
|
Python
|
mapBookmarkSite/mapbookmark/migrations/0005_auto_20210408_1559.py
|
UJHa/MapBookmarkSite
|
f80aa1576ccfab2fe53e993b36f1c798450af56b
|
[
"MIT"
] | null | null | null |
mapBookmarkSite/mapbookmark/migrations/0005_auto_20210408_1559.py
|
UJHa/MapBookmarkSite
|
f80aa1576ccfab2fe53e993b36f1c798450af56b
|
[
"MIT"
] | null | null | null |
mapBookmarkSite/mapbookmark/migrations/0005_auto_20210408_1559.py
|
UJHa/MapBookmarkSite
|
f80aa1576ccfab2fe53e993b36f1c798450af56b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-08 06:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapbookmark', '0004_auto_20210407_0820'),
]
operations = [
migrations.AlterField(
model_name='marker',
name='content',
field=models.CharField(default='', max_length=300),
),
migrations.AlterField(
model_name='marker',
name='title',
field=models.CharField(default='', max_length=100),
),
]
| 23.708333
| 63
| 0.578207
|
1ec0238c252fb78ade6d32b5ef08ead50069a5f4
| 263
|
py
|
Python
|
youtube_subdownloader/youtube.py
|
lavinske/Automation-scripts
|
3803a39c761971e630e0b1e659d1372c90be178a
|
[
"MIT"
] | null | null | null |
youtube_subdownloader/youtube.py
|
lavinske/Automation-scripts
|
3803a39c761971e630e0b1e659d1372c90be178a
|
[
"MIT"
] | null | null | null |
youtube_subdownloader/youtube.py
|
lavinske/Automation-scripts
|
3803a39c761971e630e0b1e659d1372c90be178a
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
open = webdriver.Chrome()
open.get('https://youtube.com')
searchmenu = open.find_element_by_xpath('//*[@id="search"]')
searchmenu.send_keys('Pewdiepie')
go = open.find_element_by_xpath('//*[@id="search-icon-legacy"]')
go.click()
| 23.909091
| 64
| 0.726236
|
84765592541142b2647a85631a7cc7abbcba88ba
| 254
|
py
|
Python
|
nodemcu_uploader/__init__.py
|
j-keck/nodemcu-uploader
|
557a25f37b1fb4e31a745719e237e42fff192834
|
[
"MIT"
] | null | null | null |
nodemcu_uploader/__init__.py
|
j-keck/nodemcu-uploader
|
557a25f37b1fb4e31a745719e237e42fff192834
|
[
"MIT"
] | null | null | null |
nodemcu_uploader/__init__.py
|
j-keck/nodemcu-uploader
|
557a25f37b1fb4e31a745719e237e42fff192834
|
[
"MIT"
] | 1
|
2020-03-17T04:13:22.000Z
|
2020-03-17T04:13:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Peter Magnusson <peter@birchroad.net>
"""Library and util for uploading files to NodeMCU version 0.9.4 and later"""
from .version import __version__
from .uploader import Uploader
| 28.222222
| 77
| 0.732283
|
b9d6b0ba55947aa4ad28b621451d92fe46e46846
| 401
|
py
|
Python
|
Awwardsclone/wsgi.py
|
MungaiKeren/Awwards-Clone
|
3aaff84dbe19e6163efec90436777d1bb687507c
|
[
"MIT"
] | null | null | null |
Awwardsclone/wsgi.py
|
MungaiKeren/Awwards-Clone
|
3aaff84dbe19e6163efec90436777d1bb687507c
|
[
"MIT"
] | 9
|
2020-06-05T23:59:38.000Z
|
2022-02-10T09:33:34.000Z
|
Awwardsclone/wsgi.py
|
MungaiKeren/Awwards-Clone
|
3aaff84dbe19e6163efec90436777d1bb687507c
|
[
"MIT"
] | 1
|
2020-05-26T03:18:39.000Z
|
2020-05-26T03:18:39.000Z
|
"""
WSGI config for Awwardsclone project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Awwardsclone.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
a142204391ce7d63a4a8b3a7e73976c5bf4f7770
| 2,034
|
py
|
Python
|
utils.py
|
dude123studios/Connect
|
cd4669d025720b394080d8f99eedce02c31f835b
|
[
"CC0-1.0"
] | null | null | null |
utils.py
|
dude123studios/Connect
|
cd4669d025720b394080d8f99eedce02c31f835b
|
[
"CC0-1.0"
] | null | null | null |
utils.py
|
dude123studios/Connect
|
cd4669d025720b394080d8f99eedce02c31f835b
|
[
"CC0-1.0"
] | 1
|
2021-03-16T19:42:22.000Z
|
2021-03-16T19:42:22.000Z
|
from passlib.hash import pbkdf2_sha256
from itsdangerous import URLSafeTimedSerializer
from flask import current_app
import uuid
from flask_uploads import extension
from extensions import image_set, cache
from PIL import Image
import os
def hash_password(password):
return pbkdf2_sha256.hash(password)
def check_password(password, hashed):
return pbkdf2_sha256.verify(password, hashed)
def generate_token(email, salt=None):
serializer = URLSafeTimedSerializer(current_app.config.get('SECRET_KEY'))
return serializer.dumps(email, salt=salt)
def save_image(image, folder):
filename = '{}.{}'.format(uuid.uuid4(), extension(image.filename))
image_set.save(image, folder=folder, name=filename)
filename = compress_image(filename=filename, folder=folder)
return filename
def verify_token(token, max_age=(30*60), salt=None):
serializer = URLSafeTimedSerializer(current_app.config.get('SECRET_KEY'))
try:
email = serializer.loads(token, max_age=max_age, salt=salt)
except:
return False
return email
def compress_image(filename, folder):
filepath = image_set.path(filename=filename, folder=folder)
image = Image.open(filepath)
if image.mode != 'RGB':
image = image.convert('RGB')
if max(image.width, image.height) > 1600:
maxsize = (1600, 1600)
image.thumbnail(maxsize, Image.ANTIALIAS)
compressed_filename = '{}.jpg'.format(uuid.uuid4())
compressed_filepath = image_set.path(compressed_filename, folder=folder)
image.save(compressed_filepath, optimizer=True, quality=50)
os.remove(filepath)
return compressed_filename
def clear_cache(key_prefix):
keys = [key for key in cache.cache._cache.keys() if key.startswith(key_prefix)]
cache.delete_many(*keys)
class Mail(object):
def __init__(self, domain, api_key):
# do stuff
def send_email(self,
to,
subject,
text,
html,
link):
#do stuff
| 33.344262
| 83
| 0.696657
|
5f9efb753be1a0571e6db375c0317779d5badb98
| 154
|
py
|
Python
|
client/admin.py
|
JohnChen97/authentication-lab
|
19f382c53ed04fbbe765d12094735aacd05c0ef7
|
[
"MIT"
] | null | null | null |
client/admin.py
|
JohnChen97/authentication-lab
|
19f382c53ed04fbbe765d12094735aacd05c0ef7
|
[
"MIT"
] | null | null | null |
client/admin.py
|
JohnChen97/authentication-lab
|
19f382c53ed04fbbe765d12094735aacd05c0ef7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from rest_framework.authtoken.admin import TokenAdmin
# Register your models here.
TokenAdmin.raw_id_fields = ('user', )
| 30.8
| 53
| 0.811688
|
db76c7ff157947f10d2fde13d3492380f8216499
| 56,230
|
py
|
Python
|
build/management/commands/build_g_proteins.py
|
jesperswillem/protwis
|
a36cd60fe9724d61b2c78c4d16f9d3697543b8aa
|
[
"Apache-2.0"
] | null | null | null |
build/management/commands/build_g_proteins.py
|
jesperswillem/protwis
|
a36cd60fe9724d61b2c78c4d16f9d3697543b8aa
|
[
"Apache-2.0"
] | null | null | null |
build/management/commands/build_g_proteins.py
|
jesperswillem/protwis
|
a36cd60fe9724d61b2c78c4d16f9d3697543b8aa
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from common.models import WebResource, WebLink
from protein.models import (Protein, ProteinGProtein,ProteinGProteinPair, ProteinConformation, ProteinState, ProteinFamily, ProteinAlias,
ProteinSequenceType, Species, Gene, ProteinSource, ProteinSegment)
from residue.models import (ResidueNumberingScheme, ResidueGenericNumber, Residue, ResidueGenericNumberEquivalent)
from signprot.models import SignprotStructure, SignprotBarcode, SignprotComplex
import pandas as pd
from optparse import make_option
from itertools import islice
import pandas as pd
from Bio import SeqIO
import Bio.PDB as PDB
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from collections import OrderedDict
import math
import numpy as np
import logging
import csv
import sys, os
import csv
import shlex, subprocess
import requests, xmltodict
import yaml
import xlrd
from urllib.request import urlopen
def read_excel(excelpath, sheet=None):
workbook = xlrd.open_workbook(excelpath)
worksheets = workbook.sheet_names()
data = {}
for worksheet_name in worksheets:
if sheet and worksheet_name != sheet:
continue
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = -1 #skip first, otherwise -1
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
if worksheet.cell_value(curr_row, 0) == '' and worksheet.cell_value(curr_row, 1) == 'SEM': #if empty reference
# If sem row, then add previous first cell, which is the protein name.
protein = worksheet.cell_value(curr_row-1, 0)
data_type = 'SEM'
curr_cell = 0 #skip first empty cell.
elif curr_row!=0 and worksheet.cell_value(curr_row, 0) == '': #if empty row
continue
elif curr_row == 0:
# First row -- fetch headers
headers = []
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
if cell_value:
headers.append(cell_value)
continue
else:
# MEAN row
protein = worksheet.cell_value(curr_row, 0)
data_type = 'mean'
if protein not in data:
data[protein] = {}
curr_cell = 1 #Skip first two rows which contain protein and type
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
gprotein = headers[curr_cell-2]
if gprotein not in data[protein]:
data[protein][gprotein] = {}
data[protein][gprotein][data_type] = cell_value
return data
class Command(BaseCommand):
help = 'Build G proteins'
# source file directory
gprotein_data_path = os.sep.join([settings.DATA_DIR, 'g_protein_data'])
if not os.path.exists(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'PDB_UNIPROT_ENSEMBLE_ALL.txt'])):
with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'PDB_UNIPROT_ENSEMBLE_ALL.txt']),'w') as f:
f.write('PDB_ID\tPDB_Chain\tPosition\tResidue\tCGN\tEnsembl_Protein_ID\tUniprot_ACC\tUniprot_ID\tsortColumn\n')
gprotein_data_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'PDB_UNIPROT_ENSEMBLE_ALL.txt'])
barcode_data_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'barcode_data.csv'])
pdbs_path = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'pdbs'])
lookup = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'CGN_lookup.csv'])
alignment_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'CGN_referenceAlignment.fasta'])
ortholog_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'gprotein_orthologs.csv'])
aska_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'LogRAi G-chimera submitted ver.xlsx'])
local_uniprot_dir = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot'])
local_uniprot_beta_dir = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot_beta'])
local_uniprot_gamma_dir = os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot_gamma'])
remote_uniprot_dir = 'http://www.uniprot.org/uniprot/'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('--filename', action='append', dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('--wt', default=False, type=str, help='Add wild type protein sequence to data')
parser.add_argument('--xtal', default=False, type=str, help='Add xtal to data')
parser.add_argument('--build_datafile', default=False, action='store_true', help='Build PDB_UNIPROT_ENSEMBLE_ALL file')
def handle(self, *args, **options):
self.options = options
# self.update_alignment()
# self.add_new_orthologs()
# return 0
# self.fetch_missing_uniprot_files()
# return 0
# files = os.listdir(self.local_uniprot_dir)
# for f in files:
# print(f)
# with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot', f]), 'r') as fi:
# for l in fi.readlines():
# if l.startswith('DE'):
# print(l)
# return 0
# with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'g_protein_segment_ends.yaml']), 'r') as yfile:
# dic = yaml.load(yfile)
# with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'test_alignment.fasta']), 'w') as testfile:
# for record in SeqIO.parse(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'g_proteins.fasta']), 'fasta'):
# sp, accession, name, ens = record.id.split('|')
# sequence = record.seq
# for d in dic:
# if name.lower() in dic[d]:
# offset = 0
# try:
# for seg in ['HN','S1','H1','HA','HB','HC','HD','HE','HF','S2','S3','H2','S4','H3','S5','HG','H4','S6','H5']:
# s, e = dic[d][name.lower()][seg][0], dic[d][name.lower()][seg][1]
# sequence = sequence[:s-1+offset]+'['+sequence[s-1+offset:e+offset]+']'+sequence[e+offset:]
# offset+=2
# break
# except:
# pass
# testfile.write(record.id+'\n'+str(sequence)+'\n')
# return 0
# dic = {}
# for record in SeqIO.parse(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'g_proteins.fasta']), 'fasta'):
# sp, accession, name, ens = record.id.split('|')
# if 'HUMAN' in name:
# subtype = name.split('_')[0].lower()
# dic[subtype] = {name.lower(): {}}
# else:
# dic[subtype][name.lower()] = {}
# # dic[subtype][name.lower()]['accession'] = accession
# # dic[subtype][name.lower()]['ensemble'] =
# try:
# p = Protein.objects.get(entry_name=name.lower())
# helices = ProteinSegment.objects.filter(proteinfamily='Alpha', category__in=['helix','sheet'])
# for h in helices:
# helix_resis = Residue.objects.filter(protein_conformation__protein=p, protein_segment=h)
# start, end = helix_resis[0], helix_resis.reverse()[0]
# dic[subtype][name.lower()][h.slug] = [start.sequence_number, end.sequence_number]
# except:
# print(name)
# with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'g_protein_segment_ends.yaml']), 'w') as f:
# yaml.dump(dic, f, indent=4)
# return 0
# with open(self.barcode_data_file,'r') as bdf:
# bdf_lines = bdf.readlines()
# new_lines = []
# for line in bdf_lines:
# line_split = line.split(',')
# if line_split[4] in ['G.hgh4.09','G.hgh4.10','G.H4.01','G.H4.02','G.H4.03','G.H4.04','G.H4.05','G.H4.06','G.H4.07','G.H4.08','G.H4.09','G.H4.10','G.H4.11','G.H4.12','G.H4.13',
# 'G.H4.14','G.H4.15','G.H4.16','G.H4.17','G.H4.18','G.H4.19','G.H4.20','G.H4.21','G.H4.22','G.H4.23','G.H4.24','G.H4.25','G.H4.26','G.H4.27']:
# line_split[1] = str(0)
# line_split[2] = str(0)
# line_split[5] = str(0)+'\n'
# if line_split[4] in ['G.hgh4.09','G.hgh4.10']:
# if line_split[3] not in ['GNAS2', 'GNAL']:
# if line_split[4]=='G.hgh4.09':
# line_split[4] = 'G.H4.01'
# if line_split[4]=='G.hgh4.10':
# line_split[4] = 'G.H4.02'
# elif line_split[4]=='G.H4.01':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.11'
# else:
# line_split[4] = 'G.H4.03'
# elif line_split[4]=='G.H4.02':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.12'
# else:
# line_split[4] = 'G.H4.04'
# elif line_split[4]=='G.H4.03':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.13'
# else:
# line_split[4] = 'G.H4.05'
# elif line_split[4]=='G.H4.04':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.14'
# else:
# line_split[4] = 'G.H4.06'
# elif line_split[4]=='G.H4.05':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.15'
# else:
# line_split[4] = 'G.H4.07'
# elif line_split[4]=='G.H4.06':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.16'
# else:
# line_split[4] = 'G.H4.08'
# elif line_split[4]=='G.H4.07':
# if line_split[3] in ['GNAS2', 'GNAL']:
# line_split[4] = 'G.hgh4.17'
# else:
# line_split[4] = 'G.H4.09'
# if line_split[3] in ['GNAS2', 'GNAL']:
# if line_split[4]=='G.H4.08':
# line_split[4] = 'G.hgh4.18'
# elif line_split[4]=='G.H4.09':
# line_split[4] = 'G.hgh4.19'
# elif line_split[4]=='G.H4.10':
# line_split[4] = 'G.hgh4.20'
# elif line_split[4]=='G.H4.11':
# line_split[4] = 'G.hgh4.21'
# elif line_split[4]=='G.H4.12':
# line_split[4] = 'G.H4.01'
# elif line_split[4]=='G.H4.13':
# line_split[4] = 'G.H4.02'
# elif line_split[4]=='G.H4.14':
# line_split[4] = 'G.H4.03'
# elif line_split[4]=='G.H4.15':
# line_split[4] = 'G.H4.04'
# elif line_split[4]=='G.H4.16':
# line_split[4] = 'G.H4.05'
# elif line_split[4]=='G.H4.17':
# line_split[4] = 'G.H4.06'
# elif line_split[4]=='G.H4.18':
# line_split[4] = 'G.H4.07'
# elif line_split[4]=='G.H4.19':
# line_split[4] = 'G.H4.08'
# elif line_split[4]=='G.H4.20':
# line_split[4] = 'G.H4.10'
# elif line_split[4]=='G.H4.21':
# line_split[4] = 'G.H4.11'
# elif line_split[4]=='G.H4.22':
# line_split[4] = 'G.H4.12'
# elif line_split[4]=='G.H4.23':
# line_split[4] = 'G.H4.13'
# elif line_split[4]=='G.H4.24':
# line_split[4] = 'G.H4.14'
# elif line_split[4]=='G.H4.25':
# line_split[4] = 'G.H4.15'
# elif line_split[4]=='G.H4.26':
# line_split[4] = 'G.H4.16'
# elif line_split[4]=='G.H4.27':
# line_split[4] = 'G.H4.17'
# else:
# if line_split[4]=='G.H4.20':
# line_split[4] = 'G.H4.10'
# elif line_split[4]=='G.H4.21':
# line_split[4] = 'G.H4.11'
# elif line_split[4]=='G.H4.22':
# line_split[4] = 'G.H4.12'
# elif line_split[4]=='G.H4.23':
# line_split[4] = 'G.H4.13'
# elif line_split[4]=='G.H4.24':
# line_split[4] = 'G.H4.14'
# elif line_split[4]=='G.H4.25':
# line_split[4] = 'G.H4.15'
# elif line_split[4]=='G.H4.26':
# line_split[4] = 'G.H4.16'
# elif line_split[4]=='G.H4.27':
# line_split[4] = 'G.H4.17'
# new_lines.append(line_split)
# with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'barcode_test.csv']), 'w') as f:
# out_lines = []
# for l in new_lines:
# out_line = ','.join(l)
# out_lines.append(out_line)
# out = ''.join(out_lines)
# f.write(out)
# return 0
if options['filename']:
filenames = options['filename']
else:
filenames = False
if self.options['wt']:
self.add_entry()
elif self.options['build_datafile']:
self.build_table_from_fasta()
else:
#add gproteins from cgn db
try:
self.purge_signprot_complex_data()
self.purge_coupling_data()
# self.purge_cgn_residues()
# self.purge_other_subunit_residues()
self.purge_cgn_proteins()
self.purge_other_subunit_proteins()
self.ortholog_mapping = OrderedDict()
with open(self.ortholog_file, 'r') as ortholog_file:
ortholog_data = csv.reader(ortholog_file, delimiter=',')
for i,row in enumerate(ortholog_data):
if i==0:
header = list(row)
continue
for j, column in enumerate(row):
if j in [0,1]:
continue
if '_' in column:
self.ortholog_mapping[column] = row[0]
else:
if column=='':
continue
self.ortholog_mapping[column+'_'+header[j]] = row[0]
self.create_g_proteins(filenames)
self.cgn_create_proteins_and_families()
human_and_orths = self.cgn_add_proteins()
self.update_protein_conformation(human_and_orths)
self.create_barcode()
self.add_other_subunits()
if os.path.exists(self.aska_file):
self.add_aska_coupling_data()
except Exception as msg:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
self.logger.error(msg)
def add_other_subunits(self):
beta_fam, created = ProteinFamily.objects.get_or_create(slug='100_002', name='Beta', parent=ProteinFamily.objects.get(name='G-Protein'))
gigsgt, created = ProteinFamily.objects.get_or_create(slug='100_002_001', name='G(I)/G(S)/G(T)', parent=beta_fam)
gamma_fam, created = ProteinFamily.objects.get_or_create(slug='100_003', name='Gamma', parent=ProteinFamily.objects.get(name='G-Protein'))
gigsgo, created = ProteinFamily.objects.get_or_create(slug='100_003_001', name='G(I)/G(S)/G(O)', parent=gamma_fam)
# create proteins
self.create_beta_gamma_proteins(self.local_uniprot_beta_dir, gigsgt)
self.create_beta_gamma_proteins(self.local_uniprot_gamma_dir, gigsgo)
# create residues
self.create_beta_gamma_residues(gigsgt)
self.create_beta_gamma_residues(gigsgo)
def create_beta_gamma_residues(self, proteinfamily):
bulk = []
fam = Protein.objects.filter(family=proteinfamily)
for prot in fam:
for i,j in enumerate(prot.sequence):
prot_conf = ProteinConformation.objects.get(protein=prot)
r = Residue(sequence_number=i+1, amino_acid=j, display_generic_number=None, generic_number=None, protein_conformation=prot_conf, protein_segment=None)
bulk.append(r)
if len(bulk) % 10000 == 0:
self.logger.info('Inserted bulk {} (Index:{})'.format(len(bulk),i))
# print(len(bulk),"inserts!",index)
Residue.objects.bulk_create(bulk)
# print('inserted!')
bulk = []
Residue.objects.bulk_create(bulk)
def create_beta_gamma_proteins(self, uniprot_dir, proteinfamily):
files = os.listdir(uniprot_dir)
for f in files:
acc = f.split('.')[0]
up = self.parse_uniprot_file(acc)
pst = ProteinSequenceType.objects.get(slug='wt')
species = Species.objects.get(common_name=up['species_common_name'])
source = ProteinSource.objects.get(name=up['source'])
try:
name = up['names'][0].split('Guanine nucleotide-binding protein ')[1]
except:
name = up['names'][0]
prot, created = Protein.objects.get_or_create(entry_name=up['entry_name'], accession=acc, name=name, sequence=up['sequence'], family=proteinfamily, parent=None,
residue_numbering_scheme=None, sequence_type=pst, source=source, species=species)
state = ProteinState.objects.get(slug='active')
prot_conf, created = ProteinConformation.objects.get_or_create(protein=prot, state=state, template_structure=None)
def fetch_missing_uniprot_files(self):
BASE = 'http://www.uniprot.org'
KB_ENDPOINT = '/uniprot/'
uniprot_files = os.listdir(self.local_uniprot_dir)
new_uniprot_files = os.listdir(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot']))
for record in SeqIO.parse(self.alignment_file, 'fasta'):
sp, accession, name, ens = record.id.split('|')
if accession+'.txt' not in uniprot_files and accession+'.txt' not in new_uniprot_files:
g_prot, species = name.split('_')
result = requests.get(BASE + KB_ENDPOINT + accession + '.txt')
with open(os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot', accession+'.txt']), 'w') as f:
f.write(result.text)
else:
try:
os.rename(os.sep.join([self.local_uniprot_dir, accession+'.txt']), os.sep.join([settings.DATA_DIR, 'g_protein_data', 'uniprot', accession+'.txt']))
except:
print('Missing: {}'.format(accession))
def update_alignment(self):
with open(self.lookup, 'r') as csvfile:
lookup_data = csv.reader(csvfile, delimiter=',', quotechar='"')
lookup_dict = OrderedDict([('-1','NA.N-terminal insertion.-1'),('-2','NA.N-terminal insertion.-2'),('-3','NA.N-terminal insertion.-3')])
for row in lookup_data:
lookup_dict[row[0]] = row[1:]
residue_data = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
for i, row in residue_data.iterrows():
try:
residue_data['CGN'][i] = lookup_dict[str(int(residue_data['sortColumn'][i]))][6].replace('(','').replace(')','')
except:
pass
residue_data['sortColumn'] = residue_data['sortColumn'].astype(int)
residue_data.to_csv(path_or_buf=self.gprotein_data_path+'/test.txt', sep='\t', na_rep='NA', index=False)
def add_new_orthologs(self):
residue_data = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
with open(self.lookup, 'r') as csvfile:
lookup_data = csv.reader(csvfile, delimiter=',', quotechar='"')
lookup_dict = OrderedDict([('-1','NA.N-terminal insertion.-1'),('-2','NA.N-terminal insertion.-2'),('-3','NA.N-terminal insertion.-3')])
for row in lookup_data:
lookup_dict[row[0]] = row[1:]
fasta_dict = OrderedDict()
for record in SeqIO.parse(self.alignment_file, 'fasta'):
sp, accession, name, ens = record.id.split('|')
g_prot, species = name.split('_')
if g_prot not in fasta_dict:
fasta_dict[g_prot] = OrderedDict([(name, [accession, ens, str(record.seq)])])
else:
fasta_dict[g_prot][name] = [accession, ens, str(record.seq)]
with open(self.ortholog_file, 'r') as ortholog_file:
ortholog_data = csv.reader(ortholog_file, delimiter=',')
for i,row in enumerate(ortholog_data):
if i==0:
header = list(row)
continue
for j, column in enumerate(row):
if j in [0,1]:
continue
if column!='':
if '_' in column:
entry_name = column
BASE = 'http://www.uniprot.org'
KB_ENDPOINT = '/uniprot/'
result = requests.get(BASE + KB_ENDPOINT, params={'query': 'mnemonic:{}'.format(entry_name), 'format':'list'})
accession = result.text.replace('\n','')
else:
entry_name = '{}_{}'.format(column,header[j])
accession = column
if entry_name not in fasta_dict[row[0]]:
result = requests.get('https://www.uniprot.org/uniprot/{}.xml'.format(accession))
uniprot_entry = result.text
try:
entry_dict = xmltodict.parse(uniprot_entry)
except:
self.logger.warning('Skipped: {}'.format(accession))
continue
try:
ensembl = [i for i in entry_dict['uniprot']['entry']['dbReference'] if i['@type']=='Ensembl'][0]['@id']
except:
self.logger.warning('Missing Ensembl: {}'.format(accession))
sequence = entry_dict['uniprot']['entry']['sequence']['#text'].replace('\n','')
seqc = SeqCompare()
aligned_seq = seqc.align(fasta_dict[row[0]][row[0]+'_HUMAN'][2],sequence)
fasta_dict[row[0]][entry_name] = [accession, ensembl, aligned_seq]
with open(os.sep.join([settings.DATA_DIR, 'g_protein_data','fasta_test.fa']),'w') as f:
for g,val in fasta_dict.items():
for i,j in val.items():
f.write('>sp|{}|{}|{}\n{}\n'.format(j[0],i,j[1],j[2]))
def add_entry(self):
if not self.options['wt']:
raise AssertionError('Error: Missing wt name')
residue_data = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
try:
if residue_data['Uniprot_ID'][self.options['wt']] and not self.options['xtal']:
return 0
except:
pass
with open(self.lookup, 'r') as csvfile:
lookup_data = csv.reader(csvfile, delimiter=',', quotechar='"')
lookup_dict = OrderedDict([('-1','NA.N-terminal insertion.-1'),('-2','NA.N-terminal insertion.-2'),('-3','NA.N-terminal insertion.-3')])
for row in lookup_data:
lookup_dict[row[0]] = row[1:]
sequence = ''
for record in SeqIO.parse(self.alignment_file, 'fasta'):
sp, accession, name, ens = record.id.split('|')
if name==self.options['wt'].upper():
sequence = record.seq
break
with open(self.gprotein_data_file, 'a') as f:
count_sort, count_res = 0,0
if self.options['xtal']:
m = PDB.PDBParser(os.sep.join('st', [pdbs_path, self.options['xtal']+'.pdb']))[0]
else:
for i in sequence:
count_sort+=1
if i=='-':
continue
count_res+=1
try:
cgn = lookup_dict[str(count_sort)][6].replace('(','').replace(')','')
except:
print('Dict error: {}'.format(self.options['wt']))
line = 'NA\tNA\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(count_res, i, cgn, ens, accession, name, count_sort)
f.write(line)
def build_table_from_fasta(self):
os.chdir('/vagrant/shared/sites/protwis')
for record in SeqIO.parse(self.alignment_file, 'fasta'):
sp, accession, name, ens = record.id.split('|')
if len(record.seq)!=455:
continue
command = "/env/bin/python3 manage.py build_g_proteins --wt "+str(name.lower())
subprocess.call(shlex.split(command))
def purge_coupling_data(self):
try:
ProteinGProteinPair.objects.filter().delete()
ProteinGProtein.all().delete()
ProteinAlias.objects.filter(protein__family__slug__startswith='100').delete()
except:
self.logger.warning('Existing data cannot be deleted')
def purge_cgn_residues(self):
try:
Residue.objects.filter(generic_number_id__scheme__slug="cgn").delete()
except:
self.logger.warning('Existing Residue data cannot be deleted')
def purge_other_subunit_residues(self):
try:
Residue.objects.filter(protein_conformation__protein__family__parent__name="Beta").delete()
except:
self.logger.warning('Existing Residue data cannot be deleted')
try:
Residue.objects.filter(protein_conformation__protein__family__parent__name="Gamma").delete()
except:
self.logger.warning('Existing Residue data cannot be deleted')
def purge_signprot_complex_data(self):
try:
SignprotComplex.objects.all().delete()
except:
self.logger.warning('SignprotComplex data cannot be deleted')
def create_barcode(self):
barcode_data = pd.read_csv(self.barcode_data_file, low_memory=False)
for index, entry in enumerate(barcode_data.iterrows()):
similarity = barcode_data[index:index+1]['aln_seqSim'].values[0]
identity = barcode_data[index:index+1]['aln_seqIdn'].values[0]
entry_name = barcode_data[index:index+1]['subfamily'].values[0].lower()+'_human'
CGN = barcode_data[index:index+1]['CGN'].values[0]
paralog = barcode_data[index:index+1]['paralog'].values[0]
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
continue
try:
cgn=Residue.objects.get(protein_conformation__protein=p, display_generic_number__label=CGN)
except:
# self.logger.warning('No residue number (GAP - position) for', CGN, "in ", p.name, "")
continue
if cgn:
try:
barcode, created = SignprotBarcode.objects.get_or_create(protein=p, residue=cgn, seq_similarity=similarity, seq_identity=identity, paralog_score=paralog)
if created:
self.logger.info('Created barcode for ' + CGN + ' for protein ' + p.name)
except IntegrityError:
self.logger.error('Failed creating barcode for ' + CGN + ' for protein ' + p.name)
def create_g_proteins(self, filenames=False):
self.logger.info('CREATING GPROTEINS')
translation = {'Gs family':'100_001_001', 'Gi/Go family':'100_001_002', 'Gq/G11 family':'100_001_003','G12/G13 family':'100_001_004',}
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.gprotein_data_path) if fn.endswith('Gprotein_crossclass.csv')]
source = "GuideToPharma"
for filename in filenames:
filepath = os.sep.join([self.gprotein_data_path, filename])
self.logger.info('Reading filename' + filename)
with open(filepath, 'r') as f:
reader = csv.reader(f)
for row in islice(reader, 1, None): # skip first line
entry_name = row[0]
primary = row[8]
secondary = row[9]
# fetch protein
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
print("protein not found for ", entry_name)
continue
primary = primary.replace("G protein (identity unknown)","None") #replace none
primary = primary.split(", ")
secondary = secondary.replace("G protein (identity unknown)","None") #replace none
secondary = secondary.split(", ")
if primary=='None' and secondary=='None':
print('no data for ', entry_name)
continue
# print(primary,secondary)
try:
for gp in primary:
if gp in ['','None','_-arrestin','Arrestin','G protein independent mechanism']: #skip bad ones
continue
g = ProteinGProtein.objects.get_or_create(name=gp, slug=translation[gp])[0]
# print(p, g)
gpair = ProteinGProteinPair(protein=p, g_protein=g, transduction='primary', source = source)
gpair.save()
except:
print("error in primary assignment", p, gp)
try:
for gp in secondary:
if gp in ['None','_-arrestin','Arrestin','G protein independent mechanism', '']: #skip bad ones
continue
if gp in primary: #sip those that were already primary
continue
g = ProteinGProtein.objects.get_or_create(name=gp, slug=translation[gp])[0]
gpair = ProteinGProteinPair(protein=p, g_protein=g, transduction='secondary', source = source)
gpair.save()
except:
print("error in secondary assignment", p, gp)
self.logger.info('COMPLETED CREATING G PROTEINS')
def add_aska_coupling_data(self):
self.logger.info('CREATING ASKA COUPLING')
translation = {'Gs family':'100_001_001', 'Gi/Go family':'100_001_002', 'Gq/G11 family':'100_001_003','G12/G13 family':'100_001_004',}
# read source files
filepath = self.aska_file
sheet = "LogRAi mean and SEM"
self.logger.info('Reading filename' + filepath)
data = read_excel(filepath,sheet)
source = 'Aska'
lookup = {}
for entry_name, couplings in data.items():
# if it has / then pick first, since it gets same protein
entry_name = entry_name.split("/")[0]
# append _human to entry name
# entry_name = "{}_HUMAN".format(entry_name).lower()
# Fetch protein
try:
p = Protein.objects.filter(genes__name=entry_name, species__common_name="Human")[0]
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
print("protein not found for ", entry_name)
continue
for gprotein, values in couplings.items():
if gprotein not in lookup:
gp = Protein.objects.filter(genes__name=gprotein, species__common_name="Human")[0]
lookup[gprotein] = gp
else:
gp = lookup[gprotein]
# Assume there are there.
if gp.family.slug not in lookup:
g = ProteinGProtein.objects.get(slug=gp.family.slug)
lookup[gp.family.slug] = g
else:
g = lookup[gp.family.slug]
gpair = ProteinGProteinPair(protein=p, g_protein=g, source = source, log_rai_mean=values['mean'], log_rai_sem=values['SEM'], g_protein_subunit = gp)
gpair.save()
self.logger.info('COMPLETED ADDING ASKA COULPLING DATA')
def purge_cgn_proteins(self):
try:
Protein.objects.filter(residue_numbering_scheme__slug='cgn').delete()
except:
self.logger.info('Protein to delete not found')
def purge_other_subunit_proteins(self):
try:
Protein.objects.filter(residue_numbering_scheme=None).delete()
except:
self.logger.info('Protein to delete not found')
def add_cgn_residues(self, gprotein_list):
#Parsing pdb uniprot file for residues
self.logger.info('Start parsing PDB_UNIPROT_ENSEMBLE_ALL')
self.logger.info('Parsing file ' + self.gprotein_data_file)
residue_data = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
residue_data = residue_data.loc[residue_data['Uniprot_ACC'].isin(gprotein_list)]
cgn_scheme = ResidueNumberingScheme.objects.get(slug='cgn')
# Temp files to speed things up
temp = {}
temp['proteins'] = {}
temp['rgn'] = {}
temp['segment'] = {}
temp['equivalent'] = {}
bulk = []
self.logger.info('Insert residues: {} rows'.format(len(residue_data)))
for index, row in residue_data.iterrows():
if row['Uniprot_ACC'] in temp['proteins']:
pr = temp['proteins'][row['Uniprot_ACC']][0]
pc = temp['proteins'][row['Uniprot_ACC']][1]
else:
#fetch protein for protein conformation
pr, c= Protein.objects.get_or_create(accession=row['Uniprot_ACC'])
#fetch protein conformation
pc, c= ProteinConformation.objects.get_or_create(protein_id=pr)
temp['proteins'][row['Uniprot_ACC']] = [pr,pc]
#fetch residue generic number
rgnsp=[]
if(int(row['CGN'].split('.')[2])<10):
rgnsp = row['CGN'].split('.')
rgn_new = rgnsp[0]+'.'+rgnsp[1]+'.0'+rgnsp[2]
if rgn_new in temp['rgn']:
rgn = temp['rgn'][rgn_new]
else:
rgn, c= ResidueGenericNumber.objects.get_or_create(label=rgn_new)
temp['rgn'][rgn_new] = rgn
else:
if row['CGN'] in temp['rgn']:
rgn = temp['rgn'][row['CGN']]
else:
rgn, c= ResidueGenericNumber.objects.get_or_create(label=row['CGN'])
temp['rgn'][row['CGN']] = rgn
#fetch protein segment id
if row['CGN'].split(".")[1] in temp['segment']:
ps = temp['segment'][row['CGN'].split(".")[1]]
else:
ps, c= ProteinSegment.objects.get_or_create(slug=row['CGN'].split(".")[1], proteinfamily='Alpha')
temp['segment'][row['CGN'].split(".")[1]] = ps
try:
bulk_r = Residue(sequence_number=row['Position'], protein_conformation=pc, amino_acid=row['Residue'], generic_number=rgn, display_generic_number=rgn, protein_segment=ps)
# self.logger.info("Residues added to db")
bulk.append(bulk_r)
except:
self.logger.error("Failed to add residues")
if len(bulk) % 10000 == 0:
self.logger.info('Inserted bulk {} (Index:{})'.format(len(bulk),index))
# print(len(bulk),"inserts!",index)
Residue.objects.bulk_create(bulk)
# print('inserted!')
bulk = []
# Add also to the ResidueGenericNumberEquivalent table needed for single residue selection
try:
if rgn.label not in temp['equivalent']:
ResidueGenericNumberEquivalent.objects.get_or_create(label=rgn.label,default_generic_number=rgn, scheme=cgn_scheme)
temp['equivalent'][rgn.label] = 1
# self.logger.info("Residues added to ResidueGenericNumberEquivalent")
except:
self.logger.error("Failed to add residues to ResidueGenericNumberEquivalent")
self.logger.info('Inserted bulk {} (Index:{})'.format(len(bulk),index))
Residue.objects.bulk_create(bulk)
def update_protein_conformation(self, gprotein_list):
#gprotein_list=['gnaz_human','gnat3_human', 'gnat2_human', 'gnat1_human', 'gnas2_human', 'gnaq_human', 'gnao_human', 'gnal_human', 'gnai3_human', 'gnai2_human','gnai1_human', 'gna15_human', 'gna14_human', 'gna12_human', 'gna11_human', 'gna13_human']
state = ProteinState.objects.get(slug='active')
#add new cgn protein conformations
for g in gprotein_list:
gp = Protein.objects.get(accession=g)
try:
pc, created= ProteinConformation.objects.get_or_create(protein=gp, state=state, template_structure=None)
self.logger.info('Created protein conformation')
except:
self.logger.error('Failed to create protein conformation')
self.update_genericresiduenumber_and_proteinsegments(gprotein_list)
def update_genericresiduenumber_and_proteinsegments(self, gprotein_list):
#Parsing pdb uniprot file for generic residue numbers
self.logger.info('Start parsing PDB_UNIPROT_ENSEMBLE_ALL')
self.logger.info('Parsing file ' + self.gprotein_data_file)
residue_data = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
residue_data = residue_data[residue_data.Uniprot_ID.notnull()]
#residue_data = residue_data[residue_data['Uniprot_ID'].str.contains('_HUMAN')]
residue_data = residue_data[residue_data['Uniprot_ACC'].isin(gprotein_list)]
#filtering for human gproteins using list above
residue_generic_numbers= residue_data['CGN']
#Residue numbering scheme is the same for all added residue generic numbers (CGN)
cgn_scheme = ResidueNumberingScheme.objects.get(slug='cgn')
#purge line
#ResidueGenericNumber.objects.filter(scheme_id=12).delete()
for rgn in residue_generic_numbers.unique():
ps, c= ProteinSegment.objects.get_or_create(slug=rgn.split('.')[1], proteinfamily='Alpha')
rgnsp=[]
if(int(rgn.split('.')[2])<10):
rgnsp = rgn.split('.')
rgn_new = rgnsp[0]+'.'+rgnsp[1]+'.0'+rgnsp[2]
else:
rgn_new = rgn
try:
res_gen_num, created= ResidueGenericNumber.objects.get_or_create(label=rgn_new, scheme=cgn_scheme, protein_segment=ps)
self.logger.info('Created generic residue number')
except:
self.logger.error('Failed creating generic residue number')
self.add_cgn_residues(gprotein_list)
def cgn_add_proteins(self):
self.logger.info('Start parsing PDB_UNIPROT_ENSEMBLE_ALL')
self.logger.info('Parsing file ' + self.gprotein_data_file)
#parsing file for accessions
df = pd.read_table(self.gprotein_data_file, sep="\t", low_memory=False)
prot_type = 'purge'
pfm = ProteinFamily()
#Human proteins from CGN with families as keys: http://www.mrc-lmb.cam.ac.uk/CGN/about.html
cgn_dict = {}
cgn_dict['G-Protein']=['Gs', 'Gi/o', 'Gq/11', 'G12/13']
cgn_dict['100_001_001']=['GNAS2_HUMAN', 'GNAL_HUMAN']
cgn_dict['100_001_002']=['GNAI2_HUMAN', 'GNAI1_HUMAN', 'GNAI3_HUMAN', 'GNAT2_HUMAN', 'GNAT1_HUMAN', 'GNAT3_HUMAN', 'GNAZ_HUMAN', 'GNAO_HUMAN' ]
cgn_dict['100_001_003']=['GNAQ_HUMAN', 'GNA11_HUMAN', 'GNA14_HUMAN', 'GNA15_HUMAN']
cgn_dict['100_001_004']=['GNA12_HUMAN', 'GNA13_HUMAN']
#list of all 16 proteins
cgn_proteins_list=[]
for k in cgn_dict.keys():
for p in cgn_dict[k]:
if p.endswith('_HUMAN'):
cgn_proteins_list.append(p)
#print(cgn_proteins_list)
#GNA13_HUMAN missing from cambridge file
accessions= df.loc[df['Uniprot_ID'].isin(cgn_proteins_list)]
accessions= accessions['Uniprot_ACC'].unique()
#Create new residue numbering scheme
self.create_cgn_rns()
#purging one cgn entry
#ResidueNumberingScheme.objects.filter(name='cgn').delete()
rns = ResidueNumberingScheme.objects.get(slug='cgn')
for a in accessions:
up = self.parse_uniprot_file(a)
#Fetch Protein Family for gproteins
for k in cgn_dict.keys():
name=str(up['entry_name']).upper()
if name in cgn_dict[k]:
pfm = ProteinFamily.objects.get(slug=k)
#Create new Protein
self.cgn_creat_gproteins(pfm, rns, a, up)
###################ORTHOLOGS###############
orthologs_pairs =[]
orthologs =[]
#Orthologs for human gproteins
allprots = list(df.Uniprot_ID.unique())
allprots = list(set(allprots) - set(cgn_proteins_list))
# for gp in cgn_proteins_list:
for p in allprots:
# if str(p).startswith(gp.split('_')[0]):
if str(p) in self.ortholog_mapping:
# orthologs_pairs.append((str(p), self.ortholog_mapping[str(p)]+'_HUMAN'))
orthologs.append(str(p))
accessions_orth= df.loc[df['Uniprot_ID'].isin(orthologs)]
accessions_orth= accessions_orth['Uniprot_ACC'].unique()
for a in accessions_orth:
up = self.parse_uniprot_file(a)
#Fetch Protein Family for gproteins
for k in cgn_dict.keys():
name=str(up['entry_name']).upper()
name = name.split('_')[0]+'_'+'HUMAN'
if name in cgn_dict[k]:
pfm = ProteinFamily.objects.get(slug=k)
else:
try:
if self.ortholog_mapping[str(up['entry_name']).upper()]+'_HUMAN' in cgn_dict[k]:
pfm = ProteinFamily.objects.get(slug=k)
except:
pass
#Create new Protein
self.cgn_creat_gproteins(pfm, rns, a, up)
#human gproteins
orthologs_lower = [x.lower() for x in orthologs]
#print(orthologs_lower)
#orthologs to human gproteins
cgn_proteins_list_lower = [x.lower() for x in cgn_proteins_list]
#all gproteins
gprotein_list = cgn_proteins_list_lower + orthologs_lower
accessions_all = list(accessions_orth) + list(accessions)
return list(accessions_all)
def cgn_creat_gproteins(self, family, residue_numbering_scheme, accession, uniprot):
# get/create protein source
try:
source, created = ProteinSource.objects.get_or_create(name=uniprot['source'],
defaults={'name': uniprot['source']})
if created:
self.logger.info('Created protein source ' + source.name)
except IntegrityError:
source = ProteinSource.objects.get(name=uniprot['source'])
# get/create species
try:
species, created = Species.objects.get_or_create(latin_name=uniprot['species_latin_name'],
defaults={
'common_name': uniprot['species_common_name'],
})
if created:
self.logger.info('Created species ' + species.latin_name)
except IntegrityError:
species = Species.objects.get(latin_name=uniprot['species_latin_name'])
# get/create protein sequence type
# Wild-type for all sequences from source file
try:
sequence_type, created = ProteinSequenceType.objects.get_or_create(slug='wt',
defaults={
'slug': 'wt',
'name': 'Wild-type',
})
if created:
self.logger.info('Created protein sequence type Wild-type')
except:
self.logger.error('Failed creating protein sequence type Wild-type')
# create protein
p = Protein()
p.family = family
p.species = species
p.source = source
p.residue_numbering_scheme = residue_numbering_scheme
p.sequence_type = sequence_type
if accession:
p.accession = accession
p.entry_name = uniprot['entry_name'].lower()
try:
p.name = uniprot['names'][0].split('Guanine nucleotide-binding protein ')[1]
except:
p.name = uniprot['names'][0]
p.sequence = uniprot['sequence']
try:
p.save()
self.logger.info('Created protein {}'.format(p.entry_name))
except Exception as msg:
self.logger.error('Failed creating protein {} ({})'.format(p.entry_name,msg))
# protein aliases
for i, alias in enumerate(uniprot['names']):
pcgn = Protein.objects.get(entry_name=uniprot['entry_name'].lower())
a = ProteinAlias()
a.protein = pcgn
a.name = alias
a.position = i
try:
a.save()
self.logger.info('Created protein alias ' + a.name + ' for protein ' + p.name)
except:
self.logger.error('Failed creating protein alias ' + a.name + ' for protein ' + p.name)
# genes
for i, gene in enumerate(uniprot['genes']):
g = False
try:
g, created = Gene.objects.get_or_create(name=gene, species=species, position=i)
if created:
self.logger.info('Created gene ' + g.name + ' for protein ' + p.name)
except IntegrityError:
g = Gene.objects.get(name=gene, species=species, position=i)
if g:
pcgn = Protein.objects.get(entry_name=uniprot['entry_name'].lower())
g.proteins.add(pcgn)
# structures
for i, structure in enumerate(uniprot['structures']):
try:
res = structure[1]
if res == '-':
res = 0
structure, created = SignprotStructure.objects.get_or_create(PDB_code=structure[0], resolution=res)
if created:
self.logger.info('Created structure ' + structure.PDB_code + ' for protein ' + p.name)
except IntegrityError:
self.logger.error('Failed creating structure ' + structure.PDB_code + ' for protein ' + p.name)
if g:
pcgn = Protein.objects.get(entry_name=uniprot['entry_name'].lower())
structure.origin.add(pcgn)
structure.save()
def cgn_parent_protein_family(self):
pf_cgn, created_pf = ProteinFamily.objects.get_or_create(slug='100', defaults={
'name': 'G-Protein'}, parent=ProteinFamily.objects.get(slug='000'))
pff_cgn = ProteinFamily.objects.get(slug='100', name='G-Protein')
#Changed name "No Ligands" to "Gprotein"
pf1_cgn = ProteinFamily.objects.get_or_create(slug='100_001', name='Alpha', parent=pff_cgn)
def create_cgn_rns(self):
rns_cgn, created= ResidueNumberingScheme.objects.get_or_create(slug='cgn', short_name='CGN', defaults={
'name': 'Common G-alpha numbering scheme'})
def cgn_create_proteins_and_families(self):
#Creating single entries in "protein_family' table
ProteinFamily.objects.filter(slug__startswith="100").delete()
self.cgn_parent_protein_family()
#Human proteins from CGN: http://www.mrc-lmb.cam.ac.uk/CGN/about.html
cgn_dict = {}
levels = ['2', '3']
keys = ['Alpha', 'Gs', 'Gi/o', 'Gq/11', 'G12/13', '001']
slug1='100'
slug3= ''
i=1
cgn_dict['Alpha']=['001']
cgn_dict['001']=['Gs', 'Gi/o', 'Gq/11', 'G12/13']
#Protein families to be added
#Key of dictionary is level in hierarchy
cgn_dict['1']=['Alpha']
cgn_dict['2']=['001']
cgn_dict['3']=['Gs', 'Gi/o', 'Gq/11', 'G12/13']
#Protein lines not to be added to Protein families
cgn_dict['4']=['GNAS2', 'GNAL', 'GNAI1', 'GNAI2', 'GNAI3', 'GNAT2', 'GNAT1', 'GNAT3', 'GNAO', 'GNAZ', 'GNAQ', 'GNA11', 'GNA14', 'GNA15', 'GNA12', 'GNA13']
for entry in cgn_dict['001']:
name = entry
slug2= '_001'
slug3= '_00' + str(i)
slug = slug1 + slug2 + slug3
slug3 = ''
i = i+1
pff_cgn = ProteinFamily.objects.get(slug='100_001')
new_pf, created = ProteinFamily.objects.get_or_create(slug=slug, name=entry, parent=pff_cgn)
#function to create necessary arguments to add protein entry
self.cgn_add_proteins()
def parse_uniprot_file(self, accession):
filename = accession + '.txt'
local_file_path = os.sep.join([self.local_uniprot_dir, filename])
remote_file_path = self.remote_uniprot_dir + filename
up = {}
up['genes'] = []
up['names'] = []
up['structures'] = []
read_sequence = False
remote = False
# record whether organism has been read
os_read = False
# should local file be written?
local_file = False
try:
if os.path.isfile(local_file_path):
uf = open(local_file_path, 'r')
self.logger.info('Reading local file ' + local_file_path)
else:
uf = urlopen(remote_file_path)
remote = True
self.logger.info('Reading remote file ' + remote_file_path)
local_file = open(local_file_path, 'w')
for raw_line in uf:
# line format
if remote:
line = raw_line.decode('UTF-8')
else:
line = raw_line
# write to local file if appropriate
if local_file:
local_file.write(line)
# end of file
if line.startswith('//'):
break
# entry name and review status
if line.startswith('ID'):
split_id_line = line.split()
up['entry_name'] = split_id_line[1].lower()
review_status = split_id_line[2].strip(';')
if review_status == 'Unreviewed':
up['source'] = 'TREMBL'
elif review_status == 'Reviewed':
up['source'] = 'SWISSPROT'
# species
elif line.startswith('OS') and not os_read:
species_full = line[2:].strip().strip('.')
species_split = species_full.split('(')
up['species_latin_name'] = species_split[0].strip()
if len(species_split) > 1:
up['species_common_name'] = species_split[1].strip().strip(')')
else:
up['species_common_name'] = up['species_latin_name']
os_read = True
# names
elif line.startswith('DE'):
split_de_line = line.split('=')
if len(split_de_line) > 1:
split_segment = split_de_line[1].split('{')
up['names'].append(split_segment[0].strip().strip(';'))
# genes
elif line.startswith('GN'):
split_gn_line = line.split(';')
for segment in split_gn_line:
if '=' in segment:
split_segment = segment.split('=')
split_segment = split_segment[1].split(',')
for gene_name in split_segment:
split_gene_name = gene_name.split('{')
up['genes'].append(split_gene_name[0].strip())
# structures
elif line.startswith('DR') and 'PDB' in line and not 'sum' in line:
split_gn_line = line.split(';')
up['structures'].append([split_gn_line[1].lstrip(),split_gn_line[3].lstrip().split(" A")[0]])
# sequence
elif line.startswith('SQ'):
split_sq_line = line.split()
seq_len = int(split_sq_line[2])
read_sequence = True
up['sequence'] = ''
elif read_sequence == True:
up['sequence'] += line.strip().replace(' ', '')
# close the Uniprot file
uf.close()
except:
return False
# close the local file if appropriate
if local_file:
local_file.close()
return up
class SeqCompare(object):
def __init__(self):
pass
def align(self, seq1, seq2):
for p in pairwise2.align.globalms(seq1, seq2, 8, 5, -5, -5):
return format_alignment(*p).split('\n')[2]
| 44.485759
| 257
| 0.53708
|
80109f7c8f0889af2c93cb81eaaed583566f120d
| 2,415
|
py
|
Python
|
devpotato_bot/error_handler.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:41:29.000Z
|
2021-05-15T23:41:29.000Z
|
devpotato_bot/error_handler.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2022-02-19T20:38:33.000Z
|
2022-02-19T23:53:39.000Z
|
devpotato_bot/error_handler.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:42:21.000Z
|
2021-05-15T23:42:21.000Z
|
import logging
import sys
import traceback
import telegram
from telegram import Update, ParseMode, Chat, User
from telegram.ext import CallbackContext
def create_callback(developer_ids):
logger = logging.getLogger(__name__)
def callback(update: Update, context: CallbackContext):
"""Log Errors caused by Updates."""
error_str = str(context.error)
if not error_str:
error_str = repr(context.error)
message_parts = [f'An error <code>{error_str}</code> was triggered']
if update:
user: User = update.effective_user
if user:
message_parts.append(f' by user {user.mention_html()}')
chat: Chat = update.effective_chat
if chat:
if chat.type == 'private':
message_parts.append(' in private chat')
else:
message_parts.append(f' in {chat.type} <i>{chat.title}</i>')
if update.effective_chat.username:
message_parts.append(f' (@{chat.username})')
message_parts.append(f' (id: {chat.id})')
if update.poll:
message_parts.append(f' with poll id {update.poll.id}')
trace = ''.join(traceback.format_tb(sys.exc_info()[2]))
if trace:
message_parts.append(f'. Full traceback:\n\n<code>{trace}</code>')
message_text = ''.join(message_parts)
delivery_failed = set()
for dev_id in developer_ids:
try:
context.bot.send_message(dev_id, message_text, parse_mode=ParseMode.HTML)
except (telegram.error.Unauthorized, telegram.error.BadRequest):
# User blocked the bot or didn't initiate conversation with it
delivery_failed.add(dev_id)
logger.warning('Update "%s" triggered an error', update, exc_info=context.error)
if delivery_failed:
failed_ids_str = ' '.join(str(i) for i in delivery_failed)
text = f'DM error reports delivery failed for users: {failed_ids_str}'
for dev_id in (developer_ids - delivery_failed):
try:
context.bot.send_message(dev_id, text)
except (telegram.error.Unauthorized, telegram.error.BadRequest):
pass # just ignore it
logger.warning(text)
return callback
| 39.590164
| 89
| 0.595445
|
b1ebc17e9c05a9affc49ef3b1c2730745b493ffe
| 12,068
|
py
|
Python
|
core/domain/rights_domain.py
|
Rijuta-s/oppia
|
f4f3cd71f90285abee3b0f74062586aaafadce7d
|
[
"Apache-2.0"
] | null | null | null |
core/domain/rights_domain.py
|
Rijuta-s/oppia
|
f4f3cd71f90285abee3b0f74062586aaafadce7d
|
[
"Apache-2.0"
] | 3
|
2021-02-13T08:35:34.000Z
|
2021-05-18T12:17:06.000Z
|
core/domain/rights_domain.py
|
Rijuta-s/oppia
|
f4f3cd71f90285abee3b0f74062586aaafadce7d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for rights for various user actions."""
from __future__ import annotations
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.domain import change_domain
from core.domain import user_services
# IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve
# backward-compatibility with previous exploration snapshots in the datastore.
# Do not modify the definitions of CMD keys that already exist.
CMD_CREATE_NEW = feconf.CMD_CREATE_NEW
CMD_CHANGE_ROLE = feconf.CMD_CHANGE_ROLE
CMD_REMOVE_ROLE = feconf.CMD_REMOVE_ROLE
CMD_CHANGE_EXPLORATION_STATUS = feconf.CMD_CHANGE_EXPLORATION_STATUS
CMD_CHANGE_COLLECTION_STATUS = feconf.CMD_CHANGE_COLLECTION_STATUS
CMD_CHANGE_PRIVATE_VIEWABILITY = feconf.CMD_CHANGE_PRIVATE_VIEWABILITY
CMD_RELEASE_OWNERSHIP = feconf.CMD_RELEASE_OWNERSHIP
CMD_UPDATE_FIRST_PUBLISHED_MSEC = feconf.CMD_UPDATE_FIRST_PUBLISHED_MSEC
ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE
ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC
ROLE_OWNER = feconf.ROLE_OWNER
ROLE_EDITOR = feconf.ROLE_EDITOR
ROLE_VOICE_ARTIST = feconf.ROLE_VOICE_ARTIST
ROLE_VIEWER = feconf.ROLE_VIEWER
ROLE_NONE = feconf.ROLE_NONE
ASSIGN_ROLE_COMMIT_MESSAGE_TEMPLATE = 'Changed role of %s from %s to %s'
ASSIGN_ROLE_COMMIT_MESSAGE_REGEX = '^Changed role of (.*) from (.*) to (.*)$'
DEASSIGN_ROLE_COMMIT_MESSAGE_TEMPLATE = 'Remove %s from role %s'
DEASSIGN_ROLE_COMMIT_MESSAGE_REGEX = '^Remove (.*) from role (.*)$'
class ActivityRights:
"""Domain object for the rights/publication status of an activity (an
exploration or a collection).
"""
def __init__(
self, exploration_id, owner_ids, editor_ids, voice_artist_ids,
viewer_ids, community_owned=False, cloned_from=None,
status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False,
first_published_msec=None):
self.id = exploration_id
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.voice_artist_ids = voice_artist_ids
self.viewer_ids = viewer_ids
self.community_owned = community_owned
self.cloned_from = cloned_from
self.status = status
self.viewable_if_private = viewable_if_private
self.first_published_msec = first_published_msec
def validate(self):
"""Validates an ActivityRights object.
Raises:
utils.ValidationError. If any of the owners, editors, voice artists
and viewers lists overlap, or if a community-owned exploration
has owners, editors, voice artists or viewers specified.
"""
if self.community_owned:
if (self.owner_ids or self.editor_ids or self.voice_artist_ids or
self.viewer_ids):
raise utils.ValidationError(
'Community-owned explorations should have no owners, '
'editors, voice artists or viewers specified.')
if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE:
raise utils.ValidationError(
'Community-owned explorations cannot be private.')
if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids:
raise utils.ValidationError(
'Public explorations should have no viewers specified.')
owner_editor = set(self.owner_ids) & set(self.editor_ids)
owner_voice_artist = set(self.owner_ids) & set(self.voice_artist_ids)
owner_viewer = set(self.owner_ids) & set(self.viewer_ids)
editor_voice_artist = set(self.editor_ids) & set(self.voice_artist_ids)
editor_viewer = set(self.editor_ids) & set(self.viewer_ids)
voice_artist_viewer = set(self.voice_artist_ids) & set(self.viewer_ids)
if owner_editor:
raise utils.ValidationError(
'A user cannot be both an owner and an editor: %s' %
owner_editor)
if owner_voice_artist:
raise utils.ValidationError(
'A user cannot be both an owner and a voice artist: %s' %
owner_voice_artist)
if owner_viewer:
raise utils.ValidationError(
'A user cannot be both an owner and a viewer: %s' %
owner_viewer)
if editor_voice_artist:
raise utils.ValidationError(
'A user cannot be both an editor and a voice artist: %s' %
editor_voice_artist)
if editor_viewer:
raise utils.ValidationError(
'A user cannot be both an editor and a viewer: %s' %
editor_viewer)
if voice_artist_viewer:
raise utils.ValidationError(
'A user cannot be both a voice artist and a viewer: %s' %
voice_artist_viewer)
if not self.community_owned and len(self.owner_ids) == 0:
raise utils.ValidationError(
'Activity should have atleast one owner.')
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of ActivityRights suitable for use by the
frontend.
"""
if self.community_owned:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': True,
'owner_names': [],
'editor_names': [],
'voice_artist_names': [],
'viewer_names': [],
'viewable_if_private': self.viewable_if_private,
}
else:
return {
'cloned_from': self.cloned_from,
'status': self.status,
'community_owned': False,
'owner_names': user_services.get_human_readable_user_ids(
self.owner_ids),
'editor_names': user_services.get_human_readable_user_ids(
self.editor_ids),
'voice_artist_names': user_services.get_human_readable_user_ids(
self.voice_artist_ids),
'viewer_names': user_services.get_human_readable_user_ids(
self.viewer_ids),
'viewable_if_private': self.viewable_if_private,
}
def is_owner(self, user_id):
"""Checks whether given user is owner of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity owner.
"""
return bool(user_id in self.owner_ids)
def is_editor(self, user_id):
"""Checks whether given user is editor of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity editor.
"""
return bool(user_id in self.editor_ids)
def is_voice_artist(self, user_id):
"""Checks whether given user is voice artist of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity voice artist.
"""
return bool(user_id in self.voice_artist_ids)
def is_viewer(self, user_id):
"""Checks whether given user is viewer of activity.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is an activity viewer.
"""
return bool(user_id in self.viewer_ids)
def is_published(self):
"""Checks whether activity is published.
Returns:
bool. Whether activity is published.
"""
return bool(self.status == ACTIVITY_STATUS_PUBLIC)
def is_private(self):
"""Checks whether activity is private.
Returns:
bool. Whether activity is private.
"""
return bool(self.status == ACTIVITY_STATUS_PRIVATE)
def is_solely_owned_by_user(self, user_id):
"""Checks whether the activity is solely owned by the user.
Args:
user_id: str. The id of the user.
Returns:
bool. Whether the activity is solely owned by the user.
"""
return user_id in self.owner_ids and len(self.owner_ids) == 1
def assign_new_role(self, user_id, new_role):
"""Assigns new role to user and removes previous role if present.
Args:
user_id: str. The ID of the user.
new_role: str. The role of the user.
Returns:
str. The previous role of the user.
"""
old_role = ROLE_NONE
if new_role == ROLE_VIEWER:
if self.status != ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Public explorations can be viewed by anyone.')
for role, user_ids in python_utils.ZIP(
[ROLE_OWNER, ROLE_EDITOR, ROLE_VIEWER, ROLE_VOICE_ARTIST],
[self.owner_ids, self.editor_ids, self.viewer_ids,
self.voice_artist_ids]):
if user_id in user_ids:
user_ids.remove(user_id)
old_role = role
if new_role == role and old_role != new_role:
user_ids.append(user_id)
if old_role == new_role:
if old_role == ROLE_OWNER:
raise Exception(
'This user already owns this exploration.')
elif old_role == ROLE_EDITOR:
raise Exception(
'This user already can edit this exploration.')
elif old_role == ROLE_VOICE_ARTIST:
raise Exception(
'This user already can voiceover this exploration.')
elif old_role == ROLE_VIEWER:
raise Exception(
'This user already can view this exploration.')
return old_role
class ExplorationRightsChange(change_domain.BaseChange):
"""Domain object class for an exploration rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_exploration_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = feconf.EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS
class CollectionRightsChange(change_domain.BaseChange):
"""Domain object class for an collection rights change.
The allowed commands, together with the attributes:
- 'create_new'
- 'change_role' (with assignee_id, old_role, new_role)
- 'change_collection_status' (with old_status, new_status)
- 'change_private_viewability' (with
old_viewable_if_private, new_viewable_if_private)
- 'release_ownership'
- 'update_first_published_msec' (with
old_first_published_msec, new_first_published_msec)
A role must be one of the ALLOWED_ROLES.
A status must be one of the ALLOWED_STATUS.
"""
ALLOWED_COMMANDS = feconf.COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS
| 38.069401
| 80
| 0.643934
|
6a46a6f2e5e6e7481ef08753ec23e8e320d79092
| 53
|
py
|
Python
|
src/models/__init__.py
|
jkpawlowski96/TRD-client-api
|
d85c182981e155e94b185c8f1367841143fd33e0
|
[
"Unlicense"
] | null | null | null |
src/models/__init__.py
|
jkpawlowski96/TRD-client-api
|
d85c182981e155e94b185c8f1367841143fd33e0
|
[
"Unlicense"
] | null | null | null |
src/models/__init__.py
|
jkpawlowski96/TRD-client-api
|
d85c182981e155e94b185c8f1367841143fd33e0
|
[
"Unlicense"
] | null | null | null |
from .pair import Pair
from .interval import Interval
| 26.5
| 30
| 0.830189
|
645e4b45b88df6b1116cef3dff3bb1426f358c5b
| 202
|
py
|
Python
|
entities/application.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
entities/application.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
entities/application.py
|
fic2/python-dokuwiki-export
|
3584c4cd146e1d8510504064c8c8094e41a5fc9e
|
[
"MIT"
] | null | null | null |
from . import NamedEntity
class Application(NamedEntity):
def __init__(self, name, provider):
NamedEntity.__init__(self, name)
self.provider = provider
def get_descendants(self):
return []
| 16.833333
| 36
| 0.742574
|
8b3e140224a7116296f99c7d4571a3e1076e6401
| 7,215
|
py
|
Python
|
doc/conf.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | 1
|
2019-06-12T19:24:42.000Z
|
2019-06-12T19:24:42.000Z
|
doc/conf.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | null | null | null |
doc/conf.py
|
antoniotrento/modoboa
|
98eea782a080a3cdfea5abea7d288ff3d49595c6
|
[
"ISC"
] | 1
|
2020-11-20T00:25:23.000Z
|
2020-11-20T00:25:23.000Z
|
# -*- coding: utf-8 -*-
#
# Modoboa documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 3 22:29:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Modoboa'
copyright = u'2017, Antoine Nguyen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.9'
# The full version, including alpha/beta/rc tags.
release = '1.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Modoboadoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Modoboa.tex', u'Modoboa Documentation',
u'Antoine Nguyen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'modoboa', u'Modoboa Documentation',
[u'Antoine Nguyen'], 1)
]
intersphinx_mapping = {
'amavis': ('http://modoboa-amavis.readthedocs.org/en/latest/', None)
}
| 31.644737
| 80
| 0.716286
|
9b0aa9a957b4908d82d64571a4d33700be4d2f82
| 744
|
py
|
Python
|
contentcuration/contentcuration/test_settings.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | null | null | null |
contentcuration/contentcuration/test_settings.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | null | null | null |
contentcuration/contentcuration/test_settings.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | null | null | null |
import logging
from .settings import * # noqa
DEBUG = True
if RUNNING_TESTS:
# if we're running tests, run Celery tests synchronously so tests won't complete before the process
# is finished.
CELERY_TASK_ALWAYS_EAGER = True
ALLOWED_HOSTS = ["studio.local", "192.168.31.9", "127.0.0.1", "*"]
ACCOUNT_ACTIVATION_DAYS = 7
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 2
logging.basicConfig(level='INFO')
# Allow the debug() context processor to add variables to template context.
# Include here the IPs from which a local dev server might be accessed. See
# https://docs.djangoproject.com/en/2.0/ref/settings/#internal-ips
INTERNAL_IPS = (
'127.0.0.1',
'studio.local',
'192.168.31.9',
)
| 27.555556
| 103
| 0.721774
|
f04a2c0cd5fc8c26a002f7dee8363477dc27f4ce
| 22,021
|
py
|
Python
|
vespid/models/clustering.py
|
QS-2/VESPID
|
f7d27f0c4aa99229d12d90fce9a52a48339e0a59
|
[
"Apache-2.0"
] | null | null | null |
vespid/models/clustering.py
|
QS-2/VESPID
|
f7d27f0c4aa99229d12d90fce9a52a48339e0a59
|
[
"Apache-2.0"
] | null | null | null |
vespid/models/clustering.py
|
QS-2/VESPID
|
f7d27f0c4aa99229d12d90fce9a52a48339e0a59
|
[
"Apache-2.0"
] | null | null | null |
from multiprocessing.sharedctypes import Value
import hdbscan
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
import pickle
from tqdm import tqdm
import umap
import optuna
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import mlflow
from mlflow.tracking import MlflowClient
from vespid.models.mlflow_tools import load_model as load_mlflow_model
import cloudpickle
from smart_open import open
from vespid.features.interdisciplinarity import calculate_interdisciplinarity_score
from vespid import setup_logger
logger = setup_logger(module_name=__name__)
RANDOM_STATE = 42 # for reproducibility
class HdbscanEstimator(hdbscan.HDBSCAN, mlflow.pyfunc.PythonModel):
'''
Simple wrapper class for HDBSCAN that allows us to
add some scikit-learn-like scoring functionality
for doing parameter tuning with sklearn tools easily.
'''
# Note that __init__ should not be overwritten as it is difficult to abide
# by the sklearn introspection techniques hdbscan.HDBSCAN uses under the
# hood when doing so
def fit(self, X, y=None, soft_cluster=False):
'''
Modified version of the original hdbscan fit method from
https://github.com/scikit-learn-contrib/hdbscan/blob/2179c24a31742aab459c75ac4823acad2dca38cf/hdbscan/hdbscan_.py#L1133.
This version allows for soft clustering to be done at the same time as
the main clustering task, for efficiency's sake.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
y : ignored
soft_cluster : bool, optional
Indicates if soft clustering should occur along with fitting
via the main algorithm, by default False
'''
super().fit(X)
self.n_features_ = X.shape[1]
self._is_fit = True
if soft_cluster:
logger.debug("Fitting secondary soft clustering model...")
self.soft_cluster(X, train_model=True)
return self
def soft_cluster(self, data=None, train_model=False, use_original_method=False):
'''
Performs a soft-clustering with the given trained
model to generate probabilities of membership
for each datum relative to all clusters in the solution
of the trained model. Serves as a way to determine
the strength of membership of a given datum to every
cluster in the solution.
Parameters
----------
data: numpy array or pandas DataFrame. Data that you want
to assess for cluster membership. Note that, if this is
run post-clustering, the cluster
solution will *not* be re-solved, but rather the new
points will simply be assessed relative to the
existing solution. Should be of shape
(n_samples, n_features_original),
where n_features_original refers to the feature
dimensionality present in the training data.
If None, results will be returned for the training
data and assigned as the attribute self.soft_clusters.
train_model: bool. If True, assumes you don't want to use the
existing trained soft clustering model (if there is one). Ignored
if self.soft_cluster_model is None or `use_original_method=True`.
use_original_method: bool. Indicates if you want to use the
distance- and noise-based approach to soft clustering from the
hdbscan library (True) or the new approach using a secondary model
predicting probabilities of class membership (False).
Note that, with `use_original_method=True`, the probabilities
for each sample are not guaranteed to sum to 1.0.
Returns
-------
pandas DataFrame that is of shape (n_samples, n_clusters).
'''
if not self.prediction_data and use_original_method:
raise ValueError("Model didn't set `prediction_data=True` "
"during training so we probably can't get soft "
"cluster probabilities...")
if use_original_method:
self.soft_cluster_model = None
if data is None:
soft_clusters = hdbscan.all_points_membership_vectors(self)
else:
soft_clusters = hdbscan.prediction.membership_vector(
self, data)
else:
num_labels = len(np.unique(self.labels_))
if num_labels <= 1:
logger.warning(f"Clustering resulted in only {num_labels} "
"labels, which means soft clustering won't "
"work!")
raise optuna.TrialPruned()
elif train_model or not hasattr(self, 'soft_cluster_model'):
if isinstance(data, pd.DataFrame):
data = data.values
# Make data into DataFrame to facilitate tracking shuffled samples
df = pd.DataFrame(
data,
columns=[f'x_{i}' for i in range(data.shape[1])]
)
feature_columns = df.columns
df['label'] = self.labels_
df_no_noise = df[df['label'] > -1]
if len(df_no_noise) == 0:
raise RuntimeError("df_no_noise is length zero")
X_train, X_test, y_train, y_test = train_test_split(
df_no_noise[feature_columns],
df_no_noise['label'],
train_size=0.6,
shuffle=True,
stratify=df_no_noise['label'],
random_state=RANDOM_STATE
)
scaler = StandardScaler()
logreg = LogisticRegressionCV(
Cs=25,
cv=5, # 80/20 train/test split
scoring='f1_micro',
multi_class='multinomial',
max_iter=100_000,
random_state=RANDOM_STATE
)
self.soft_cluster_model = Pipeline([
('scaler', scaler),
('logreg', logreg)
])
self.soft_cluster_model.fit(X_train, y_train)
soft_clusters = self.soft_cluster_model.predict_proba(data)
# Put everything into one view, labels and probabilities
df = pd.DataFrame(
soft_clusters,
columns=[f"prob_cluster{n}" for n in range(soft_clusters.shape[1])]
)
# Save for later reference and usage
if train_model:
self.soft_cluster_probabilities = df
self._test_soft_clustering()
return df
def predict_proba(self, X=None):
'''
Generates probability vectors for all points provided,
indicating each point's probability of belonging to one of the
clusters identified during fitting.
Similar to self.soft_cluster(), but this works in pure inference mode
and assumes no knowledge of the cluster labels (as this can be used
for "clustering" new points without re-fitting the entire algorithm).
Parameters
----------
X : pandas DataFrame, optional
The input features for soft clustering. Should be of shape
self.n_features_. If None, will just return the pre-computed
probability vectors for the training data, by default None
Returns
-------
pandas DataFrame with columns "prob_cluster{i}"
Membership vectors (one cluster per column) for each data point.
Raises
------
RuntimeError
If no data are provided but no soft clustering results are
available either.
'''
if X is None and hasattr(self, 'soft_cluster_probabilities'):
return self.soft_cluster_probabilities
elif not hasattr(self, 'soft_cluster_probabilities'):
raise RuntimeError("No soft clustering model available")
elif isinstance(X, np.ndarray):
X = pd.DataFrame(X)
return pd.DataFrame(
self.soft_cluster_model.predict_proba(X),
columns=self.soft_cluster_probabilities.columns,
index=X.index
)
def predict(self, context, X=None):
'''
Provides cluster labels for all points provided. If no data are
provided, simply returns the labels for the training data. Uses the
pre-trained soft clustering model to find the cluster for each point
with the highest predicted probability and returns that label.
Parameters
----------
context : mlflow.pyfunc.PythonModelContext object
Provides contextual information (usually data from mlflow
artifact store needed to do predictions). Can be None and often is.
X : pandas DataFrame, optional
Input features. X.shape[1] == self.n_features_. If None,
pre-computed labels for all training data will be returned,
by default None
Returns
-------
pandas DataFrame with an index matching `X.index`
Cluster labels for each point.
'''
if X is None:
logger.info("No data provided for inference so returning "
"pre-calcluated labels")
return self.labels_
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
# Use the soft clustering model to predict new data cluster membership
return pd.DataFrame(
self.predict_proba(X).values.argmax(axis=1),
index=X.index
)
# def load_context(self, context):
# '''Used for loading data to be used at prediction time mainly'''
# with open(context.artifacts["hdb_model"], 'rb') as f:
# self = cloudpickle.load(f)
def info(self):
'''
Prints to the log a bunch of useful metrics about
the cluster solution, like DBCV, mean persistence,
etc.
'''
num_noise_points = len(self.labels_[self.labels_ == -1])
num_all_points = len(self.labels_)
pct_noise_points = round(num_noise_points / num_all_points * 100, 2)
num_clusters = pd.Series(self.labels_[self.labels_ > -1]).nunique()
persistence = self.cluster_persistence_
soft_model_presence = 'is' if hasattr(
self, 'soft_cluster_model') else 'is not'
logger.info(f"Number of clusters in this solution is {num_clusters} and "
f"there are {num_noise_points} ({pct_noise_points})% noise points. "
f"DBCV score is {self.relative_validity_}. "
"Additionally, there is a mean cluster persistence of "
f"{persistence.mean()} and a standard deviation of {persistence.std()}. "
f"Average cluster membership probability is {self.probabilities_.mean()} "
f"and average outlier score is {self.outlier_scores_.mean()}. "
f'Soft cluster model {soft_model_presence} present')
def score(self, X=None, y=None):
'''
Provides the DBCV value of the clustering solution found
after fitting.
Parameters
----------
X: ignored, as it's passed during the fit() phase.
y: ignored, as this is unsupervised learning!
Returns
-------
Dict of various trained-model-specific scores/stats.
'''
# Give some useful info for reporting during tuning
self.info()
num_clusters = pd.Series(self.labels_[self.labels_ > -1]).nunique()
num_noise_points = len(self.labels_[self.labels_ == -1])
scores = {
'DBCV': self.relative_validity_,
'num_clusters': num_clusters,
'num_noise_points': num_noise_points,
'persistence_mean': self.cluster_persistence_.mean(),
'persistence_std': self.cluster_persistence_.std(),
'probability_mean': self.probabilities_.mean(),
'probability_std': self.probabilities_.std(),
'outlier_score_mean': self.outlier_scores_.mean(),
'outlier_score_std': self.outlier_scores_.std(),
}
return scores
# @classmethod
# def load_model(cls, filepath):
# '''
# Loads a trained HDBSCAN model, checks it for some valuable
# attributes we'll need downstream, and spits out the trained
# model object as well as a DataFrame with useful data.
# Parameters
# ----------
# filepath: str. Path to pickled HDBSCAN object.
# Returns
# -------
# HdbscanEstimator object with DataFrame
# self.soft_cluster_probabilities included, showing the probability of each
# datum belonging to each possible cluster in this model's solution.
# '''
# with open(filepath, 'rb') as f:
# model = pickle.load(f)
# # Check that model has everything we want to play around with
# target_attributes = [
# 'cluster_persistence_',
# 'probabilities_',
# 'outlier_scores_',
# 'exemplars_'
# ]
# model_contents = dir(model)
# for a in target_attributes:
# if a not in model_contents:
# raise ValueError(f"We're missing the attribute '{a}'!!")
# num_clusters = np.unique(model.labels_).shape[0] # includes noise
# logger.info(f"This HDBSCAN solution has {num_clusters} clusters, "
# "including a noise cluster, if noise is present")
# # For backwards compatibility
# # Check if model has each method; if not, bind the method to it for use
# if 'soft_cluster' not in model_contents or model.soft_cluster is None:
# model.soft_cluster = cls.soft_cluster.__get__(model)
# if 'num_clusters_' not in model_contents:
# model.num_clusters_ = len([l for l in np.unique(model.labels_) if l != -1])
# model.soft_cluster()
# return model
@classmethod
def load_model(
cls,
run_id=None,
model_name=None,
model_version=None
):
'''
Loads an mlflow-logged model from its serialized (AKA pickled) form.
NOTE: this is not expected to work unless you are loading it into an
identical environment as the one described by the model's logged
conda.yaml file. If this isn't possible, consider loading it via
the `mlflow.pyfunc.load_model()` functionality described in the UI
page for the model. This will, however, not provide you direct access
to all of the original model attributes/methods/etc., whereas this
method will.
Parameters
----------
run_id : str, optional
Unique ID of a model training run. May only be used if `model_name`
and `model_version` are not used, by default None
model_name : str, optional
Unique name of a registered model. May only be used if `run_id` is
not used, by default None
model_version : int, optional
Version number of the registered model identified by `model_name`.
May not be used if `run_id` is used, by default None
model_stage : str, optional
Model stage of registered model, if there is one. Allowed values
can be found in vespid.models.mlflow_tools.MODEL_STAGES,
by default None
Returns
-------
HdbscanEstimator object
The logged model object in its original state as of the logging
event.
Raises
------
ValueError
Errors out if a run ID is specified at the same time as registered
model information
'''
# TODO: complete URI generation code in vespid.models.mlflow to handle run_ids, registered model names, etc.
return load_mlflow_model(run_id=run_id)
if run_id is not None:
if model_name is not None or model_version is not None:
raise ValueError("`run_id` must be the only parameter "
"specified if it is not None")
model_uri = client.get_run(run_id).info.artifact_uri + '/model'
elif model_name is not None:
if run_id is not None:
raise ValueError("`run_id` and `model_name` cannot both be "
"specified")
model_uri = client.get_model_version_download_uri(
model_name,
model_version
)
def _test_soft_clustering(self):
if isinstance(self.soft_cluster_probabilities, pd.DataFrame):
membership_vectors = self.soft_cluster_probabilities.values
else:
membership_vectors = self.soft_cluster_probabilities
# Check the membership_vectors shape
# Should be (num_points, num_clusters)
vector_shape = membership_vectors.shape
if -1 in self.labels_:
num_clusters = len(np.unique(self.labels_)) - \
1 # account for noise
else:
num_clusters = len(np.unique(self.labels_))
assert len(vector_shape) == 2, f"membership_vectors should be 2-d, got {len(vector_shape)} dimensions"
assertion_str = f"membership_vectors should be of shape " + \
f"(num_points, num_clusters), but got {vector_shape}"
assert vector_shape[1] == num_clusters, assertion_str
# Each vector between 0.0 and 1.0? Noise can allow less than 1.0 with some fuzziness for rounding error
assert np.logical_and(membership_vectors.sum(axis=1) < 1.0000001, membership_vectors.sum(axis=1) > 0).sum(
) / membership_vectors.shape[0] == 1, f"Some probability vectors sum up to less than 0.0 or more than 1.0"
df_clusters = pd.DataFrame({
'label': self.labels_,
'probability': self.probabilities_,
'max_soft_probability': membership_vectors.max(axis=1),
'max_soft_p_label': membership_vectors.argmax(axis=1)
})
# Does the max soft cluster probility match the original cluster assignment?
df_clusters = df_clusters[df_clusters['label'] > -1]
labels_match_rate = (
df_clusters['label'] == df_clusters['max_soft_p_label']
).sum() / len(df_clusters)
labels_match_pct = round(labels_match_rate * 100, 2)
assertion_str = f"Max soft cluster assignment only " + \
f"matches main algorithm assignment for non-noise points " + \
f"{labels_match_pct}% of the time"
if labels_match_rate < 0.9:
assert labels_match_rate == 1, assertion_str
else:
logger.info("Soft-clustering-derived labels and main "
f"algorithm labels agree {labels_match_pct}% "
"of the time.")
return labels_match_rate
def build_cluster_pipeline(
umap_n_components=100,
umap_n_neighbors=30,
umap_min_dist=0.0,
umap_metric='euclidean',
hdbscan_min_samples=25,
hdbscan_min_cluster_size=75,
hdbscan_cluster_selection_method='eom'
):
'''
Given a set of hyperparameters,
builds an un-trained UMAP + HdbscanEstimator pipeline
that can be trained and then used for other downstream tasks.
Parameters
----------
umap_n_components: int. Desired dimensionality of the output
UMAP embedding. Must be smaller than embeddings.shape[1].
umap_n_neighbors: int. Used in determining the balance
between local (low values) and global (high values)
structure in the UMAP reduced-dimensionality embedding.
umap_min_dist: float in the range [0.0, 0.99]. Controls how tightly
UMAP is allowed to pack points together, with low values
causing "clumpier" embeddings and thus being useful for
clustering.
umap_metric: str. Indicates the distance metric to
be used when generating UMAP embeddings.
hdbscan_min_samples: int. Number of neighbors required by HDBSCAN
for a point to be considered a "core point". Typically
higher values lead to more points labeled as noise
and denser + fewer clusters.
hdbscan_min_cluster_size: int. Number of points required for
HDBSCAN to consider a cluster valid.
hdbscan_cluster_selection_method: str. Type of method to use for
determining final flat cluster structure. Options are ['eom', 'leaf'].
Returns
-------
sklearn.Pipeline object with a UMAP component
(accessed via pipe.named_steps['umap']) and an
HdbscanEstimator component (accessed via
pipe.named_steps['hdbscan']).
'''
um = umap.UMAP(
n_jobs=1,
n_neighbors=umap_n_neighbors,
min_dist=umap_min_dist,
metric=umap_metric,
n_components=umap_n_components
)
hdb = HdbscanEstimator(
gen_min_span_tree=True,
prediction_data=False,
algorithm='boruvka_kdtree',
core_dist_n_jobs=1,
min_cluster_size=hdbscan_min_cluster_size,
min_samples=hdbscan_min_samples,
cluster_selection_method=hdbscan_cluster_selection_method
)
return Pipeline(steps=[('umap', um), ('hdbscan', hdb)])
| 38.70123
| 128
| 0.618092
|
66dd17110384ff6e4810c07a02b8c854336173c5
| 7,470
|
py
|
Python
|
tests/test_command_parse.py
|
jesuslosada/scrapy
|
8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b
|
[
"BSD-3-Clause"
] | 2
|
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
tests/test_command_parse.py
|
jesuslosada/scrapy
|
8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b
|
[
"BSD-3-Clause"
] | 1
|
2016-10-20T13:15:22.000Z
|
2016-11-21T09:21:26.000Z
|
tests/test_command_parse.py
|
jesuslosada/scrapy
|
8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b
|
[
"BSD-3-Clause"
] | 3
|
2017-10-11T01:07:20.000Z
|
2019-10-07T02:01:11.000Z
|
from os.path import join, abspath
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
from scrapy.utils.python import to_native_str
from tests.test_commands import CommandTest
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class MySpider(scrapy.Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
def parse_request_with_meta(self, response):
foo = response.meta.get('foo', 'bar')
if foo == 'bar':
self.logger.debug('It Does Not Work :(')
else:
self.logger.debug('It Works!')
def parse_request_without_meta(self, response):
foo = response.meta.get('foo', 'bar')
if foo == 'bar':
self.logger.debug('It Works!')
else:
self.logger.debug('It Does Not Work :(')
class MyGoodCrawlSpider(CrawlSpider):
name = 'goodcrawl{0}'
rules = (
Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'/text'), follow=True),
)
def parse_item(self, response):
return [scrapy.Item(), dict(foo='bar')]
def parse(self, response):
return [scrapy.Item(), dict(nomatch='default')]
class MyBadCrawlSpider(CrawlSpider):
'''Spider which doesn't define a parse_item callback while using it in a rule.'''
name = 'badcrawl{0}'
rules = (
Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),
)
def parse(self, response):
return [scrapy.Item(), dict(foo='bar')]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
import logging
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
logging.info('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_request_with_meta(self):
raw_json_string = '{"foo" : "baz"}'
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--meta', raw_json_string,
'-c', 'parse_request_with_meta',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-m', raw_json_string,
'-c', 'parse_request_with_meta',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_request_without_meta(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-c', 'parse_request_without_meta',
self.url('/html')])
self.assertIn("DEBUG: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assertIn("INFO: It Works!", to_native_str(stderr))
@defer.inlineCallbacks
def test_parse_items(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'parse', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out))
@defer.inlineCallbacks
def test_parse_items_no_callback_passed(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out))
@defer.inlineCallbacks
def test_wrong_callback_passed(self):
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-c', 'dummy', self.url('/html')]
)
self.assertRegexpMatches(to_native_str(out), """# Scraped Items -+\n\[\]""")
self.assertIn("""Cannot find callback""", to_native_str(stderr))
@defer.inlineCallbacks
def test_crawlspider_matching_rule_callback_set(self):
"""If a rule matches the URL, use it's defined callback."""
status, out, stderr = yield self.execute(
['--spider', 'goodcrawl'+self.spider_name, '-r', self.url('/html')]
)
self.assertIn("""[{}, {'foo': 'bar'}]""", to_native_str(out))
@defer.inlineCallbacks
def test_crawlspider_matching_rule_default_callback(self):
"""If a rule match but it has no callback set, use the 'parse' callback."""
status, out, stderr = yield self.execute(
['--spider', 'goodcrawl'+self.spider_name, '-r', self.url('/text')]
)
self.assertIn("""[{}, {'nomatch': 'default'}]""", to_native_str(out))
@defer.inlineCallbacks
def test_spider_with_no_rules_attribute(self):
"""Using -r with a spider with no rule should not produce items."""
status, out, stderr = yield self.execute(
['--spider', self.spider_name, '-r', self.url('/html')]
)
self.assertRegexpMatches(to_native_str(out), """# Scraped Items -+\n\[\]""")
self.assertIn("""No CrawlSpider rules found""", to_native_str(stderr))
@defer.inlineCallbacks
def test_crawlspider_missing_callback(self):
status, out, stderr = yield self.execute(
['--spider', 'badcrawl'+self.spider_name, '-r', self.url('/html')]
)
self.assertRegexpMatches(to_native_str(out), """# Scraped Items -+\n\[\]""")
@defer.inlineCallbacks
def test_crawlspider_no_matching_rule(self):
"""The requested URL has no matching rule, so no items should be scraped"""
status, out, stderr = yield self.execute(
['--spider', 'badcrawl'+self.spider_name, '-r', self.url('/enc-gb18030')]
)
self.assertRegexpMatches(to_native_str(out), """# Scraped Items -+\n\[\]""")
self.assertIn("""Cannot find a rule that matches""", to_native_str(stderr))
| 38.112245
| 85
| 0.575234
|
b0f391074ee0a8fad9af9ef39a3027250a8fc68c
| 7,358
|
py
|
Python
|
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_CoordinationSignal.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | 1
|
2019-05-13T00:40:11.000Z
|
2019-05-13T00:40:11.000Z
|
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_CoordinationSignal.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/devel/lib/python2.7/dist-packages/duckietown_msgs/msg/_CoordinationSignal.py
|
bendychua/final
|
35fd0477ec5950479f0e082a65db2aa05a92db82
|
[
"CC-BY-2.0"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from duckietown_msgs/CoordinationSignal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class CoordinationSignal(genpy.Message):
_md5sum = "38d01c6c7727f64c628b7afcb3e7ccd2"
_type = "duckietown_msgs/CoordinationSignal"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
string signal
# these must match with LED_protocol.yaml
string OFF=light_off
string SIGNAL_A=CAR_SIGNAL_A
string SIGNAL_B=CAR_SIGNAL_B
string SIGNAL_C=CAR_SIGNAL_C
string TL_GO_ALL=tl_go_all
string TL_STOP_ALL=tl_stop_all
string TL_GO_N=tl_go_N
string TL_GO_S=tl_go_S
string TL_GO_W=tl_go_W
string TL_GO_E=tl_go_E
string TL_YIELD=tl_yield
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
# Pseudo-constants
OFF = 'light_off'
SIGNAL_A = 'CAR_SIGNAL_A'
SIGNAL_B = 'CAR_SIGNAL_B'
SIGNAL_C = 'CAR_SIGNAL_C'
TL_GO_ALL = 'tl_go_all'
TL_STOP_ALL = 'tl_stop_all'
TL_GO_N = 'tl_go_N'
TL_GO_S = 'tl_go_S'
TL_GO_W = 'tl_go_W'
TL_GO_E = 'tl_go_E'
TL_YIELD = 'tl_yield'
__slots__ = ['header','signal']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,signal
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CoordinationSignal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.signal is None:
self.signal = ''
else:
self.header = std_msgs.msg.Header()
self.signal = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.signal
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.signal = str[start:end].decode('utf-8')
else:
self.signal = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.signal
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.signal = str[start:end].decode('utf-8')
else:
self.signal = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
| 31.991304
| 123
| 0.639032
|
af6b721f354894bf72177f73e581408e08ee4d78
| 10,403
|
py
|
Python
|
horovod/run/common/util/network.py
|
tirkarthi/horovod
|
957fe7418b1222ee8ad2698c7352a2629d4b5eb0
|
[
"Apache-2.0"
] | 1
|
2020-04-02T11:52:05.000Z
|
2020-04-02T11:52:05.000Z
|
horovod/run/common/util/network.py
|
marload/horovod
|
16297493ac5fe8b0dbb303b9cbf66d8178b727e2
|
[
"Apache-2.0"
] | null | null | null |
horovod/run/common/util/network.py
|
marload/horovod
|
16297493ac5fe8b0dbb303b9cbf66d8178b727e2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import socket
import struct
import threading
import cloudpickle
import psutil
from six.moves import queue, socketserver
from horovod.run.common.util import secret
from horovod.run.util.network import find_port
class PingRequest(object):
pass
class NoValidAddressesFound(Exception):
pass
class PingResponse(object):
def __init__(self, service_name, source_address):
self.service_name = service_name
"""Service name that responded to this ping."""
self.source_address = source_address
"""Source IP address that was visible to the service."""
class AckResponse(object):
"""Used for situations when the response does not carry any data."""
pass
class Wire(object):
"""
Used for serialization/deserialization of objects over the wire.
We use HMAC to protect services from unauthorized use. The key used for
the HMAC digest is distributed by Open MPI and Spark.
The objects are serialized using cloudpickle. Serialized objects become
the body of the message.
Structure of the message is as follows:
- HMAC digest of the body (32 bytes)
- length of the body (4 bytes)
- body
"""
def __init__(self, key):
self._key = key
def write(self, obj, wfile):
message = cloudpickle.dumps(obj)
digest = secret.compute_digest(self._key, message)
wfile.write(digest)
# Pack message length into 4-byte integer.
wfile.write(struct.pack('i', len(message)))
wfile.write(message)
wfile.flush()
def read(self, rfile):
digest = rfile.read(secret.DIGEST_LENGTH)
# Unpack message length into 4-byte integer.
message_len = struct.unpack('i', rfile.read(4))[0]
message = rfile.read(message_len)
if not secret.check_digest(self._key, message, digest):
raise Exception('Security error: digest did not match the message.')
return cloudpickle.loads(message)
class BasicService(object):
def __init__(self, service_name, key, nics):
self._service_name = service_name
self._wire = Wire(key)
self._nics = nics
self._server, _ = find_port(
lambda addr: socketserver.ThreadingTCPServer(
addr, self._make_handler()))
self._port = self._server.socket.getsockname()[1]
self._addresses = self._get_local_addresses()
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _handle(self, req, client_address):
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def _get_local_addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
if self._nics and intf not in self._nics:
continue
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
if not result and self._nics:
raise NoValidAddressesFound(
'No available network interface found matching user provided interface: {}'.format(self._nics))
return result
def addresses(self):
return self._addresses
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
class BasicClient(object):
def __init__(self, service_name, addresses, key, verbose, match_intf=False,
probe_timeout=20, attempts=3):
# Note: because of retry logic, ALL RPC calls are REQUIRED to be idempotent.
self._verbose = verbose
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._attempts = attempts
self._addresses = self._probe(addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Horovod was unable to connect to {service_name} on any '
'of the following addresses: {addresses}.\n\n'
'One possible cause of this problem is that '
'horovod currently requires every host to have at '
'least one routable network interface with the same '
'name across all of the hosts. '
'You can run \"ifconfig -a\" '
'on every host and check for the common '
'routable interface. '
'To fix the problem, you can rename interfaces on '
'Linux.'.format(service_name=service_name, addresses=addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._attempts):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
if self._verbose >= 2:
# Need to find the local interface name whose
# address was visible to the target host's server.
resp_intf = ''
for key in psutil.net_if_addrs().keys():
key_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(key, [])]
if resp.source_address in key_intf_addrs:
resp_intf = key
break
print('WARNING: Expected to connect the host '
'{addr} using interface '
'{intf}, but reached it on interface '
'{resp_intf}.'.format(
addr=str(addr[0])+':'+str(addr[1]),
intf=intf,
resp_intf=resp_intf))
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._attempts):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._attempts - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
| 38.106227
| 111
| 0.548496
|
9ff2ad7e59ce81e8ad8a8369bbf4ce054a667d37
| 2,580
|
py
|
Python
|
vppy/hough/_hough_lines_kde.py
|
EvDuijnhoven/vppy
|
3678e3438730ee56d13f423c7b0307f96afaac42
|
[
"MIT"
] | null | null | null |
vppy/hough/_hough_lines_kde.py
|
EvDuijnhoven/vppy
|
3678e3438730ee56d13f423c7b0307f96afaac42
|
[
"MIT"
] | null | null | null |
vppy/hough/_hough_lines_kde.py
|
EvDuijnhoven/vppy
|
3678e3438730ee56d13f423c7b0307f96afaac42
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.base import BaseEstimator
from ._hough_line import HoughLine
from typing import List, Tuple
class HoughLinesKDE(BaseEstimator):
"""Computes a KDE (Kernel Density Estimate) for a list of HoughLines"""
def __init__(self, shape, beta=5, kde_width=200, kde_height=200):
self.shape = shape
self.beta = beta
self.kde_width = kde_width
self.kde_height = kde_height
def fit(self, X: List[HoughLine], y=None):
# Define the bandwidth relative to the image size
bandwidth = 100 * self.beta / max(self.shape)
img_height, img_width = self.shape
kde = np.zeros((self.kde_height, self.kde_width), dtype=np.float32)
# For every HoughLine compute and add its kernel to the total kde
for line in X:
distance = self._get_distance_matrix(line, bandwidth, img_width, img_height)
# Calculate the kernel over the distance matrix
kernel = self._epanechnikov_matrix(distance)
# Add the weighted value of the kernel to the total kde.
kde = np.add(kde, kernel * line.weight)
# Get the max index of the kde as the vanishing point
y, x = np.unravel_index(kde.argmax(), kde.shape)
self.vp_ = self._scale_point(x, y, img_width, img_height)
self.kde_ = kde
return self
def _get_distance_matrix(self, line: HoughLine, bandwidth: float, img_width: int, img_height: int) -> np.ndarray:
"""Calculates the distance matrix for every point in the grid to the HoughLine"""
# Calculate the rho for every point in the matrix, defined by x cos(theta) + y sin(theta)
X = np.repeat([np.linspace(0, 1, self.kde_width) * img_width * np.cos(line.theta)], self.kde_height, axis=0)
Y = np.transpose(
np.repeat([np.linspace(0, 1, self.kde_height) * img_height * np.sin(line.theta)], self.kde_width, axis=0))
# Center every point around rho, smooth using the bandwidth.
return (np.add(X, Y) - line.rho) * bandwidth
def _scale_point(self, x, y, img_width: int, img_height: int) -> Tuple[int, int]:
"""Scales points to the size of the image"""
return int(np.round((x + 0.5) * img_width / self.kde_width)), int(
np.round((y + 0.5) * img_height / self.kde_height))
@staticmethod
def _epanechnikov_matrix(distance):
"""Epanechnikov kernel; https://en.wikipedia.org/wiki/Kernel_(statistics)#Kernel_functions_in_common_use"""
return 0.75 * (1 - np.minimum(np.square(distance), 1))
| 43.728814
| 118
| 0.656589
|
a09b1746539042e1a996d1fff7c0aed7db26a61d
| 2,797
|
py
|
Python
|
setup.py
|
CygnusNetworks/stomp.py
|
1599cbdcf3868989ce204c6eb4fd20448bf4f253
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
CygnusNetworks/stomp.py
|
1599cbdcf3868989ce204c6eb4fd20448bf4f253
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
CygnusNetworks/stomp.py
|
1599cbdcf3868989ce204c6eb4fd20448bf4f253
|
[
"Apache-2.0"
] | 1
|
2020-03-22T12:55:31.000Z
|
2020-03-22T12:55:31.000Z
|
#!/usr/bin/env python
from distutils.core import Command
from setuptools import setup
import logging.config
import os
import shutil
import sys
import unittest
try:
logging.config.fileConfig('stomp.log.conf')
except:
pass
# Import this after configuring logging
import stomp
class TestCommand(Command):
user_options = [('test=', 't', 'specific test to run')]
def initialize_options(self):
self.test = '*'
def finalize_options(self):
pass
def run(self):
try:
import coverage
cov = coverage.coverage()
cov.start()
except ImportError:
cov = None
suite = unittest.TestSuite()
if self.test == '*':
print('Running all tests')
tests = stomp.test.__all__
else:
tests = self.test.split(',')
import stomp.test
for tst in tests:
suite.addTests(unittest.TestLoader().loadTestsFromName('stomp.test.%s' % tst))
runner = unittest.TextTestRunner(verbosity=2)
res = runner.run(suite)
if len(res.errors) > 0 or len(res.failures) > 0:
sys.exit(1)
if cov:
cov.stop()
cov.save()
cov.html_report(directory='../stomppy-docs/htmlcov')
class TestPipInstallCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if os.path.exists('tmp'):
shutil.rmtree('tmp')
os.mkdir('tmp')
from virtualenvapi.manage import VirtualEnvironment
env = VirtualEnvironment('tmp/scratch')
env.install('stomp.py')
class DoxygenCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('doxygen config.dox')
def version():
s = []
for num in stomp.__version__:
s.append(str(num))
return '.'.join(s)
setup(
name='stomp.py',
version=version(),
description='Python STOMP client, supporting versions 1.0, 1.1 and 1.2 of the protocol',
license='Apache',
url='https://github.com/jasonrbriggs/stomp.py',
author='Jason R Briggs',
author_email='jasonrbriggs@gmail.com',
platforms=['any'],
packages=['stomp', 'stomp.adapter'],
cmdclass={'test': TestCommand, 'docs': DoxygenCommand, 'piptest': TestPipInstallCommand},
scripts=['./scripts/stomp'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
)
| 23.905983
| 93
| 0.599571
|
3902db3f63490fedfe34f5ae153ac21db54de645
| 3,996
|
py
|
Python
|
syntax practice.py
|
whitevegagabriel/pygame-blueprint
|
33fd11c13f8fc85a006891ecd4ad617444999edd
|
[
"MIT"
] | null | null | null |
syntax practice.py
|
whitevegagabriel/pygame-blueprint
|
33fd11c13f8fc85a006891ecd4ad617444999edd
|
[
"MIT"
] | null | null | null |
syntax practice.py
|
whitevegagabriel/pygame-blueprint
|
33fd11c13f8fc85a006891ecd4ad617444999edd
|
[
"MIT"
] | null | null | null |
import os
##print('hello \t im ben')
q = 'if '
self = 'self.'
colon = ':'
isstand = 'isstanding'
isup = 'isup'
isdown = 'isdown'
isright = 'isright'
isleft = 'isleft'
checks = [isstand, isup , isdown, isright, isleft] # this should be the action or item to blit
display0 = 'win.blit('
path_basic = ''
#Edit HERE
style_folder = 'chain' + '/'
#EDIT HERE
paths_plus ='chain_helm'
paths = path_basic + style_folder + paths_plus + '/'
paths2 = path_basic +'text'+'/' + paths_plus +'.txt'
oldname = 'tile'
filetype = '.png'
x=0
files = 36
div = files / 4
pop = list()
while x != files:
pop.append(x)
x += 1
#print(pop)
x = 0
pyg = "pygame.image.load('"
f = open(paths2,'w')
for char in pop:
print(x)
if x < 10:
imager = str(pop[x])
comb = paths + oldname +'00' + imager + filetype
combpy = pyg + paths + '00' + imager + filetype + "'" +")"+","
combnew = paths +'00' + imager + filetype
if x == 0:
combpy =self + paths_plus + '_up' '=' + '[' + pyg + paths + '00' + imager + filetype + "'" +")"+","
if x ==7:
combpy = pyg + paths + '00' + imager + filetype + "'" +")"+"]"
if x == 8:
combpy = self + paths_plus + '_lft' '=' + '[' + pyg + paths + '00' + imager + filetype + "'" +")"+","
x +=1
elif x >=10:
imager = str(pop[x])
combpy = pyg + paths + '0' + imager + filetype + "'"+")"+","
comb = paths + oldname + '0' + imager + filetype
combnew = paths + '0' + imager + filetype
if x == 15:
combpy = pyg + paths + '0' + imager + filetype + "'" +")"+"]"
if x == 16:
combpy =self + paths_plus + '_down' '=' + '[' + pyg + paths + '0' + imager + filetype + "'" +")"+","
if x == 23:
combpy = pyg + paths + '0' + imager + filetype + "'" +")"+"]"
if x == 24:
combpy = self + paths_plus + '_rt' '=' + '[' + pyg + paths + '0' + imager + filetype + "'" +")"+"]"
x+=1
#print(combpy)
print(comb)
print(combnew)
#os.rename(comb,combnew)
f.write(combpy)
f.write('\n')
helpful_notes='4x8: 0-8, 9 - 17, 18-26, 27-36'
textpath = 'text'
t = [0, 1, 2, 3]
tt = [0,1,2,3,4]
up = 'up'
lft = 'lft'
down = 'down'
rt = 'rt'
stand = 'standing'
direction = [up, lft, down, rt, stand]
#IF then blits, Next step is to combine multiple files to write out things like hood, shirt, pants ect.
for num in tt:
if_status = q + self + checks[num] + colon
if checks[num] == isup:
blits = '\n'+'\t'+display0 + self + paths_plus + '_' + direction[0] +'[self.walkCount//3)]' + ', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'+'\n'
elif checks[num] == isleft:
blits = '\n'+'\t'+display0 + self + paths_plus + '_' + direction[1] +'[self.walkCount//3)]' + ', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'+'\n'
elif checks[num] == isdown:
blits = '\n'+'\t'+display0 + self + paths_plus + '_' + direction[2] +'[self.walkCount//3)]' + ', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'+'\n'
elif checks[num] == isright:
blits ='\n'+ '\t'+display0 + self + paths_plus + '_' + direction[3] +'[self.walkCount//3)]' + ', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'+'\n'
elif checks[num] == isstand:
blits ='\n'+'\t'+ display0 + self + paths_plus + '_' + direction[4] + '[self.walkCount//3)]' +', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'+'\n'
# print(if_status,blits)
f.write(if_status)
f.write(blits)
#BLIT
# up lft down rt
#win.blit(self.doing_wearing_lft[self.Walkcount //3], (self.x - self.CameraX, self.y - self.CameraY))
for num in t:
blits = display0 + self + paths_plus + '_' + direction[num] + ', (' + self + 'x - ' + self + 'CameraX, ' +self + 'y - ' + self+ 'CameraY))'
#print(blits)
f.close()
| 30.738462
| 184
| 0.50951
|
0151193feb3406ccd7e6d9336e9f572f0ddef308
| 1,487
|
py
|
Python
|
src/archive/data_set.py
|
physicsgoddess1972/Precipitable-Water-Model
|
5280067fac90d1ed6dfe2a2ad589f444d81f95b3
|
[
"MIT"
] | 2
|
2019-04-07T21:26:18.000Z
|
2022-03-09T22:41:56.000Z
|
src/archive/data_set.py
|
physicsgoddess1972/Precipitable-Water-Model
|
5280067fac90d1ed6dfe2a2ad589f444d81f95b3
|
[
"MIT"
] | 25
|
2019-06-27T19:27:08.000Z
|
2022-02-20T17:34:13.000Z
|
src/archive/data_set.py
|
physicsgoddess1972/Precipitable-Water-Model
|
5280067fac90d1ed6dfe2a2ad589f444d81f95b3
|
[
"MIT"
] | 8
|
2019-04-07T20:09:46.000Z
|
2021-06-29T21:46:58.000Z
|
from numpy import *
fname1 = "../../data/modtran/temp_offset_wsv_half/modtran-5.csv"
fname2 = "../../data/modtran/temp_offset_wsv_half/modtran0.csv"
fname3 = "../../data/modtran/temp_offset_wsv_half/modtran+5.csv"
sname = "../../data/modtran/temp_offset_wsv_half/modtran.csv"
# fname1 = "../../data/modtran/ir_band/flir_low.csv"
# fname2 = "../../data/modtran/ir_band/flir_high.csv"
# fname3 = "../../data/modtran/ir_band/ames_low.csv"
# fname4 = "../../data/modtran/ir_band/ames_high.csv"
# sname = "../../data/modtran/ir_band/modtran.csv"
wl = loadtxt(fname1, delimiter=",", unpack=True, skiprows=3)[1]
rd1 = loadtxt(fname1, delimiter=",", unpack=True, skiprows=3)[3]
rd2 = loadtxt(fname2, delimiter=",", unpack=True, skiprows=3)[3]
rd3 = loadtxt(fname3, delimiter=",", unpack=True, skiprows=3)[3]
# rd4 = loadtxt(fname4, delimiter=",", unpack=True, skiprows=3)[3]
# rd5 = loadtxt(fname5, delimiter=",", unpack=True, skiprows=3)[3]
f = open(sname, "w")
# f.write("wavelength,Radiance (WVS 0.25),Radiance (WVS 0.5),Radiance (WVS 1.0),Radiance (WVS 1.5),Radiance (WVS 2.0)\n")
f.write("wavelength, Radiance (T-5), Radiance (T), Radiance (T+5)\n")
for i in range(0, len(wl)):
f.write(str(wl[i]))
f.write(",")
# f.write(str(rd5[i]))
# f.write(",")
f.write(str(rd1[i]))
f.write(",")
f.write(str(rd2[i]))
f.write(",")
f.write(str(rd3[i]))
# f.write(",")
# f.write(str(rd4[i]))
f.write("\n")
f.close()
| 39.131579
| 122
| 0.62273
|
47c03814378ef1adbc73d4f74bc2da6c25fc4c48
| 4,892
|
py
|
Python
|
python3-virtualenv/lib/python3.8/site-packages/setuptools/command/setopt.py
|
bbalkaransingh23888/OrientationHack
|
7eae6cce1226112c000ea8a175f6dc5a82ee0ac2
|
[
"MIT"
] | null | null | null |
python3-virtualenv/lib/python3.8/site-packages/setuptools/command/setopt.py
|
bbalkaransingh23888/OrientationHack
|
7eae6cce1226112c000ea8a175f6dc5a82ee0ac2
|
[
"MIT"
] | null | null | null |
python3-virtualenv/lib/python3.8/site-packages/setuptools/command/setopt.py
|
bbalkaransingh23888/OrientationHack
|
7eae6cce1226112c000ea8a175f6dc5a82ee0ac2
|
[
"MIT"
] | null | null | null |
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
import configparser
from setuptools import Command
__all__ = ["config_file", "edit_config", "option_base", "setopt"]
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == "local":
return "setup.cfg"
if kind == "global":
return os.path.join(os.path.dirname(distutils.__file__), "distutils.cfg")
if kind == "user":
dot = os.name == "posix" and "." or ""
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError("config_file() type must be 'local', 'global', or 'user'", kind)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s", section, option, filename)
opts.remove_option(section, option)
if not opts.options(section):
log.info(
"Deleting empty [%s] section from %s", section, filename
)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s", section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, "w") as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
("global-config", "g", "save options to the site-wide distutils.cfg file"),
("user-config", "u", "save options to the current user's pydistutils.cfg file"),
("filename=", "f", "configuration file to use (default=setup.cfg)"),
]
boolean_options = [
"global-config",
"user-config",
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file("global"))
if self.user_config:
filenames.append(config_file("user"))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file("local"))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option", filenames
)
(self.filename,) = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
("command=", "c", "command to set an option for"),
("option=", "o", "option to set"),
("set-value=", "s", "value of the option"),
("remove", "r", "remove (unset) the value"),
] + option_base.user_options
boolean_options = option_base.boolean_options + ["remove"]
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename,
{self.command: {self.option.replace("-", "_"): self.set_value}},
self.dry_run,
)
| 35.449275
| 88
| 0.596688
|
79b859a848e8ff3f32eb90f9b2af4ad4fdf36f2d
| 2,634
|
py
|
Python
|
raiden_contracts/tests/unit/test_settle_timeout_invariant.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/unit/test_settle_timeout_invariant.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/unit/test_settle_timeout_invariant.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
from typing import Callable
import pytest
from eth_tester.exceptions import TransactionFailed
from web3 import Web3
from web3.contract import Contract
from raiden_contracts.constants import TEST_SETTLE_TIMEOUT_MAX, TEST_SETTLE_TIMEOUT_MIN
from raiden_contracts.tests.utils import LOCKSROOT_OF_NO_LOCKS, call_and_transact, fake_bytes
from raiden_contracts.tests.utils.blockchain import mine_blocks
def test_settle_timeout_inrange(
token_network: Contract,
get_accounts: Callable,
web3: Web3,
create_close_signature_for_no_balance_proof: Callable,
) -> None:
"""The TokenNetwork constructor must enforce that settle timeout is in
the valid range.
Also asserts that the constants.py and the netting channel contract values
are synched.
"""
(A, B) = get_accounts(2)
small_settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1
large_settle_timeout = TEST_SETTLE_TIMEOUT_MAX + 1
with pytest.raises(TransactionFailed):
token_network.functions.openChannel(A, B, small_settle_timeout).call()
with pytest.raises(TransactionFailed):
token_network.functions.openChannel(A, B, large_settle_timeout).call()
call_and_transact(token_network.functions.openChannel(A, B, TEST_SETTLE_TIMEOUT_MIN))
channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()
(settle_block_number, _) = token_network.functions.getChannelInfo(
channel_identifier, A, B
).call()
assert settle_block_number == TEST_SETTLE_TIMEOUT_MIN
closing_sig = create_close_signature_for_no_balance_proof(A, channel_identifier)
call_and_transact(
token_network.functions.closeChannel(
channel_identifier=channel_identifier,
non_closing_participant=B,
closing_participant=A,
balance_hash=fake_bytes(32),
nonce=0,
additional_hash=fake_bytes(32),
non_closing_signature=fake_bytes(65),
closing_signature=closing_sig,
),
{"from": A},
)
mine_blocks(web3, TEST_SETTLE_TIMEOUT_MIN + 1)
call_and_transact(
token_network.functions.settleChannel(
channel_identifier, A, 0, 0, LOCKSROOT_OF_NO_LOCKS, B, 0, 0, LOCKSROOT_OF_NO_LOCKS
),
{"from": A},
)
call_and_transact(token_network.functions.openChannel(A, B, TEST_SETTLE_TIMEOUT_MAX))
channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()
(settle_block_number, _) = token_network.functions.getChannelInfo(
channel_identifier, A, B
).call()
assert settle_block_number == TEST_SETTLE_TIMEOUT_MAX
| 36.583333
| 94
| 0.738421
|
9568b329da27278776ec83dd156692a1f6bf585d
| 17,521
|
py
|
Python
|
sdk/python/lib/pulumi/output.py
|
marcusturewicz/pulumi
|
fbf391fd4bf76142ab10160a8679fb0986435bdf
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/lib/pulumi/output.py
|
marcusturewicz/pulumi
|
fbf391fd4bf76142ab10160a8679fb0986435bdf
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/lib/pulumi/output.py
|
marcusturewicz/pulumi
|
fbf391fd4bf76142ab10160a8679fb0986435bdf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from functools import reduce
from inspect import isawaitable
from typing import (
TypeVar,
Generic,
Set,
Callable,
Awaitable,
Union,
cast,
Mapping,
Any,
List,
Optional,
TYPE_CHECKING
)
from . import runtime
from .runtime import rpc
if TYPE_CHECKING:
from .resource import Resource
T = TypeVar('T')
U = TypeVar('U')
Input = Union[T, Awaitable[T], 'Output[T]']
Inputs = Mapping[str, Input[Any]]
class Output(Generic[T]):
"""
Output helps encode the relationship between Resources in a Pulumi application. Specifically an
Output holds onto a piece of Data and the Resource it was generated from. An Output value can
then be provided when constructing new Resources, allowing that new Resource to know both the
value as well as the Resource the value came from. This allows for a precise 'Resource
dependency graph' to be created, which properly tracks the relationship between resources.
"""
_is_known: Awaitable[bool]
"""
Whether or not this 'Output' should actually perform .apply calls. During a preview,
an Output value may not be known (because it would have to actually be computed by doing an
'update'). In that case, we don't want to perform any .apply calls as the callbacks
may not expect an undefined value. So, instead, we just transition to another Output
value that itself knows it should not perform .apply calls.
"""
_is_secret: Awaitable[bool]
"""
Whether or not this 'Output' should be treated as containing secret data. Secret outputs are tagged when
flowing across the RPC interface to the resource monitor, such that when they are persisted to disk in
our state file, they are encrypted instead of being in plaintext.
"""
_future: Awaitable[T]
"""
Future that actually produces the concrete value of this output.
"""
_resources: Awaitable[Set['Resource']]
"""
The list of resources that this output value depends on.
"""
def __init__(self, resources: Union[Awaitable[Set['Resource']], Set['Resource']],
future: Awaitable[T], is_known: Awaitable[bool],
is_secret: Optional[Awaitable[bool]] = None) -> None:
is_known = asyncio.ensure_future(is_known)
future = asyncio.ensure_future(future)
async def is_value_known() -> bool:
return await is_known and not contains_unknowns(await future)
if isinstance(resources, set):
self._resources = asyncio.Future()
self._resources.set_result(resources)
else:
self._resources = asyncio.ensure_future(resources)
self._future = future
self._is_known = asyncio.ensure_future(is_value_known())
if is_secret is not None:
self._is_secret = asyncio.ensure_future(is_secret)
else:
self._is_secret = asyncio.Future()
self._is_secret.set_result(False)
# Private implementation details - do not document.
def resources(self) -> Awaitable[Set['Resource']]:
return self._resources
def future(self, with_unknowns: Optional[bool] = None) -> Awaitable[Optional[T]]:
# If the caller did not explicitly ask to see unknown values and the value of this output contains unnkowns,
# return None. This preserves compatibility with earlier versios of the Pulumi SDK.
async def get_value() -> Optional[T]:
val = await self._future
return None if not with_unknowns and contains_unknowns(val) else val
return asyncio.ensure_future(get_value())
def is_known(self) -> Awaitable[bool]:
return self._is_known
def is_secret(self) -> Awaitable[bool]:
return self._is_secret
# End private implementation details.
def apply(self, func: Callable[[T], Input[U]], run_with_unknowns: Optional[bool] = None) -> 'Output[U]':
"""
Transforms the data of the output with the provided func. The result remains a
Output so that dependent resources can be properly tracked.
'func' is not allowed to make resources.
'func' can return other Outputs. This can be handy if you have a Output<SomeVal>
and you want to get a transitive dependency of it.
This function will be called during execution of a 'pulumi up' request. It may not run
during 'pulumi preview' (as the values of resources are of course may not be known then).
:param Callable[[T],Input[U]] func: A function that will, given this Output's value, transform the value to
an Input of some kind, where an Input is either a prompt value, a Future, or another Output of the given
type.
:return: A transformed Output obtained from running the transformation function on this Output's value.
:rtype: Output[U]
"""
result_resources: asyncio.Future[Set['Resource']] = asyncio.Future()
result_is_known: asyncio.Future[bool] = asyncio.Future()
result_is_secret: asyncio.Future[bool] = asyncio.Future()
# The "run" coroutine actually runs the apply.
async def run() -> U:
try:
# Await this output's details.
resources = await self._resources
is_known = await self._is_known
is_secret = await self._is_secret
value = await self._future
if runtime.is_dry_run():
# During previews only perform the apply if the engine was able to give us an actual value for this
# Output or if the caller is able to tolerate unknown values.
apply_during_preview = is_known or run_with_unknowns
if not apply_during_preview:
# We didn't actually run the function, our new Output is definitely
# **not** known.
result_resources.set_result(resources)
result_is_known.set_result(False)
result_is_secret.set_result(is_secret)
return cast(U, None)
# If we are running with unknown values and the value is explicitly unknown but does not actually
# contain any unknown values, collapse its value to the unknown value. This ensures that callbacks
# that expect to see unknowns during preview in outputs that are not known will always do so.
if not is_known and run_with_unknowns and not contains_unknowns(value):
value = cast(T, UNKNOWN)
transformed: Input[U] = func(value)
# Transformed is an Input, meaning there are three cases:
# 1. transformed is an Output[U]
if isinstance(transformed, Output):
transformed_as_output = cast(Output[U], transformed)
# Forward along the inner output's _resources, _is_known and _is_secret values.
transformed_resources = await transformed_as_output._resources
result_resources.set_result(resources | transformed_resources)
result_is_known.set_result(await transformed_as_output._is_known)
result_is_secret.set_result(await transformed_as_output._is_secret or is_secret)
return await transformed.future(with_unknowns=True)
# 2. transformed is an Awaitable[U]
if isawaitable(transformed):
# Since transformed is not an Output, it is known.
result_resources.set_result(resources)
result_is_known.set_result(True)
result_is_secret.set_result(is_secret)
return await cast(Awaitable[U], transformed)
# 3. transformed is U. It is trivially known.
result_resources.set_result(resources)
result_is_known.set_result(True)
result_is_secret.set_result(is_secret)
return cast(U, transformed)
finally:
# Always resolve the future if it hasn't been done already.
if not result_is_known.done():
# Try and set the result. This might fail if we're shutting down,
# so swallow that error if that occurs.
try:
result_resources.set_result(resources)
result_is_known.set_result(False)
result_is_secret.set_result(False)
except RuntimeError:
pass
run_fut = asyncio.ensure_future(run())
return Output(result_resources, run_fut, result_is_known, result_is_secret)
def __getattr__(self, item: str) -> 'Output[Any]': # type: ignore
"""
Syntax sugar for retrieving attributes off of outputs.
:param str item: An attribute name.
:return: An Output of this Output's underlying value's property with the given name.
:rtype: Output[Any]
"""
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else getattr(v, item), True)
def __getitem__(self, key: Any) -> 'Output[Any]':
"""
Syntax sugar for looking up attributes dynamically off of outputs.
:param Any key: Key for the attribute dictionary.
:return: An Output of this Output's underlying value, keyed with the given key as if it were a dictionary.
:rtype: Output[Any]
"""
return self.apply(lambda v: UNKNOWN if isinstance(v, Unknown) else cast(Any, v)[key], True)
@staticmethod
def from_input(val: Input[T]) -> 'Output[T]':
"""
Takes an Input value and produces an Output value from it, deeply unwrapping nested Input values through nested
lists and dicts. Nested objects of other types (including Resources) are not deeply unwrapped.
:param Input[T] val: An Input to be converted to an Output.
:return: A deeply-unwrapped Output that is guaranteed to not contain any Input values.
:rtype: Output[T]
"""
# Is it an output already? Recurse into the value contained within it.
if isinstance(val, Output):
return val.apply(Output.from_input, True)
# Is a dict or list? Recurse into the values within them.
if isinstance(val, dict):
# Since Output.all works on lists early, serialize this dictionary into a list of lists first.
# Once we have a output of the list of properties, we can use an apply to re-hydrate it back into a dict.
dict_items = [[k, Output.from_input(v)] for k, v in val.items()]
# type checker doesn't like returing a Dict in the apply callback
fn = cast(Callable[[List[Any]], T], lambda props: {k: v for k, v in props}) # pylint: disable=unnecessary-comprehension
return Output.all(*dict_items).apply(fn, True)
if isinstance(val, list):
list_items: List[Union[Any, Awaitable[Any], Output[Any]]] = [Output.from_input(v) for v in val]
# invariant: http://mypy.readthedocs.io/en/latest/common_issues.html#variance
output: Output[T] = cast(Output[T], Output.all(*list(list_items))) # type: ignore
return output
# If it's not an output, list, or dict, it must be known and not secret
is_known_fut: asyncio.Future[bool] = asyncio.Future()
is_secret_fut: asyncio.Future[bool] = asyncio.Future()
is_known_fut.set_result(True)
is_secret_fut.set_result(False)
# Is it awaitable? If so, schedule it for execution and use the resulting future
# as the value future for a new output.
if isawaitable(val):
val_fut = cast(asyncio.Future, val)
promise_output = Output(set(), asyncio.ensure_future(val_fut), is_known_fut, is_secret_fut)
return promise_output.apply(Output.from_input, True)
# Is it a prompt value? Set up a new resolved future and use that as the value future.
value_fut: asyncio.Future[Any] = asyncio.Future()
value_fut.set_result(val)
return Output(set(), value_fut, is_known_fut, is_secret_fut)
@staticmethod
def secret(val: Input[T]) -> 'Output[T]':
"""
Takes an Input value and produces an Output value from it, deeply unwrapping nested Input values as necessary
given the type. It also marks the returned Output as a secret, so its contents will be persisted in an encrypted
form in state files.
:param Input[T] val: An Input to be converted to an Secret Output.
:return: A deeply-unwrapped Output that is guaranteed to not contain any Input values and is marked as a Secret.
:rtype: Output[T]
"""
o = Output.from_input(val)
is_secret: asyncio.Future[bool] = asyncio.Future()
is_secret.set_result(True)
return Output(o._resources, o._future, o._is_known, is_secret)
@staticmethod
def all(*args: Input[T]) -> 'Output[List[T]]':
"""
Produces an Output of Lists from a List of Inputs.
This function can be used to combine multiple, separate Inputs into a single
Output which can then be used as the target of `apply`. Resource dependencies
are preserved in the returned Output.
:param Input[T] args: A list of Inputs to convert.
:return: An output of lists, converted from an Input to prompt values.
:rtype: Output[List[T]]
"""
# Three asynchronous helper functions to assist in the implementation:
# is_known, which returns True if all of the input's values are known,
# and false if any of them are not known,
async def is_known(outputs):
is_known_futures = list(map(lambda o: o._is_known, outputs))
each_is_known = await asyncio.gather(*is_known_futures)
return all(each_is_known)
# is_secret, which returns True if any of the input values are secret, and
# false if none of them are secret.
async def is_secret(outputs):
is_secret_futures = list(map(lambda o: o._is_secret, outputs))
each_is_secret = await asyncio.gather(*is_secret_futures)
return any(each_is_secret)
async def get_resources(outputs):
resources_futures = list(map(lambda o: o._resources, outputs))
resources_agg = await asyncio.gather(*resources_futures)
# Merge the list of resource dependencies across all inputs.
return reduce(lambda acc, r: acc.union(r), resources_agg, set())
# gather_futures, which aggregates the list of futures in each input to a future of a list.
async def gather_futures(outputs):
value_futures = list(map(lambda o: asyncio.ensure_future(o.future(with_unknowns=True)), outputs))
return await asyncio.gather(*value_futures)
from_input = cast(Callable[[Union[T, Awaitable[T], Output[T]]], Output[T]], Output.from_input)
# First, map all inputs to outputs using `from_input`.
all_outputs = list(map(from_input, args))
# Aggregate the list of futures into a future of lists.
value_futures = asyncio.ensure_future(gather_futures(all_outputs))
# Aggregate whether or not this output is known.
resources_futures = asyncio.ensure_future(get_resources(all_outputs))
known_futures = asyncio.ensure_future(is_known(all_outputs))
secret_futures = asyncio.ensure_future(is_secret(all_outputs))
return Output(resources_futures, value_futures, known_futures, secret_futures)
@staticmethod
def concat(*args: Input[str]) -> 'Output[str]':
"""
Concatenates a collection of Input[str] into a single Output[str].
This function takes a sequence of Input[str], stringifies each, and concatenates all values
into one final string. This can be used like so:
url = Output.concat("http://", server.hostname, ":", loadBalancer.port)
:param Input[str] args: A list of string Inputs to concatenate.
:return: A concatenated output string.
:rtype: Output[str]
"""
transformed_items: List[Input[Any]] = [Output.from_input(v) for v in args]
# invariant http://mypy.readthedocs.io/en/latest/common_issues.html#variance
return Output.all(*transformed_items).apply("".join) # type: ignore
class Unknown:
"""
Unknown represents a value that is unknown.
"""
def __init__(self):
pass
UNKNOWN = Unknown()
"""
UNKNOWN is the singleton unknown value.
"""
def contains_unknowns(val: Any) -> bool:
return rpc.contains_unknowns(val)
| 45.157216
| 131
| 0.649335
|
cfcea27e4074c3343fc2cbd61cd878b415c3865d
| 8,234
|
py
|
Python
|
api/vimeoapi.py
|
atsuchiy11/api-fastapi-video
|
f3f7e35d279cf5b64ee22055296baf772689b8e7
|
[
"MIT"
] | null | null | null |
api/vimeoapi.py
|
atsuchiy11/api-fastapi-video
|
f3f7e35d279cf5b64ee22055296baf772689b8e7
|
[
"MIT"
] | null | null | null |
api/vimeoapi.py
|
atsuchiy11/api-fastapi-video
|
f3f7e35d279cf5b64ee22055296baf772689b8e7
|
[
"MIT"
] | null | null | null |
import vimeo
import requests
import urllib.parse
from requests import HTTPError
import os
# from os.path import join, dirname
from dotenv import load_dotenv
from pprint import pprint
# env_path = join(dirname(__file__), "../.env")
load_dotenv(override=True)
def create_vimeo_client():
"""create Vimeo client"""
return vimeo.VimeoClient(
token=os.environ.get("VIMEO_TOKEN_PROD"),
key=os.environ.get("VIMEO_KEY_PROD"),
secret=os.environ.get("VIMEO_SECRET_PROD"),
)
def about_me(client):
res = client.get("/me")
pprint(res.json())
# OK
def get_video_from_vimeo(client, video_id):
"""Get specified video from VimeoAPI"""
params = {
"fields": "uri,name,duration,stats,privacy,embed.html,pictures.sizes",
}
query_params = urllib.parse.urlencode(params)
try:
res = client.get(f"/videos/{video_id}?{query_params}")
res.raise_for_status()
res_json = res.json()
# sort
data = {}
data["uri"] = res_json.get("uri", None)
data["name"] = res_json.get("name", None)
data["duration"] = res_json.get("duration", 0)
data["stats"] = res_json.get("stats").get("plays", 0)
# data["privacy"] = res_json.get("privacy").get("view", None)
data["html"] = res_json.get("embed").get("html")
pictures = res_json.get("pictures").get("sizes")
*_, max_size = pictures
data["thumbnail"] = max_size.get("link")
return data
except HTTPError as err:
raise err
except requests.exceptions.RequestException as err:
raise err
# OK
def get_upload_url(client, file):
"""Get upload URL for vimeo"""
preset_id = 120971641
try:
# get upload URL
res = client.post(
"/me/videos",
data={
"name": file.name,
"description": file.description,
"locale": "ja",
"privacy": {"download": False, "view": "disable"},
"upload": {"approach": "tus", "size": str(file.size)},
},
)
res_parsed = res.json()
pprint(res_parsed)
res_filtered = {}
res_filtered["uri"] = res_parsed.get("uri", "")
res_filtered["name"] = res_parsed.get("name", "")
res_filtered["type"] = res_parsed.get("type", "")
res_filtered["description"] = res_parsed.get("description", "")
res_filtered["link"] = res_parsed.get("link", "")
if (upload := res_parsed.get("upload", None)) is None:
res_filtered["upload_link"] = ""
else:
res_filtered["upload_link"] = upload.get("upload_link", "")
# allow domain
client.put(res_parsed["uri"] + "/privacy/domains/localhost:3000")
client.put(res_parsed["uri"] + "/privacy/domains/prime-studio.vercel.app")
client.patch(res_parsed["uri"], data={"privacy": {"embed": "whitelist"}})
# append embed presets
client.put(res_parsed["uri"] + f"/presets/{preset_id}")
return res_filtered
except Exception as err:
print(err)
# OK
def get_upload_status(client, video_id):
"""Get status for upload video to vimeo"""
uri = f"/videos/{video_id}"
res = client.get(uri + "?fields=transcode.status").json()
if res["transcode"]["status"] == "complete":
print("Your video finished transcoding.")
elif res["transcode"]["status"] == "in_progress":
print("Your video is still transcoding.")
else:
print("Your video encountered an error during transcoding.")
return {"transcode_status": res["transcode"]["status"]}
###
def get_total(client):
params = {"fields": "total"}
query_params = urllib.parse.urlencode(params)
try:
res = client.get(f"/me/videos?{query_params}")
# res.rasie_for_status()
res_json = res.json()
return res_json
except HTTPError as err:
raise err
except requests.exceptions.RequestException as err:
raise err
def get_videos_from_vimeo_async(client, chunk, page):
params = {
"page": page,
"per_page": chunk,
"fields": "uri,name,duration,stats,privacy,embed.html,pictures.sizes",
}
query_params = urllib.parse.urlencode(params)
print(f"getting videos: {page}")
try:
res = client.get(f"/me/videos?{query_params}")
# res.rasie_for_status()
res_json = res.json()
print(f"got videos: {page}")
api_response = {}
api_response["total"] = res_json.get("total", None)
api_response["data"] = []
for d in res_json.get("data", []):
_d = {}
_d["uri"] = d.get("uri", None)
_d["name"] = d.get("name", None)
_d["duration"] = d.get("duration", None)
_d["stats"] = d.get("stats", {})
_d["privacy"] = {}
_d["privacy"]["view"] = d.get("privacy", {}).get("view", None)
_d["html"] = d.get("embed", {}).get("html", None)
_d["thumbnail"] = {}
if (pictures := d.get("pictures", {}).get("sizes", None)) is not None:
*_, max_size = pictures
_d["thumbnail"] = max_size
api_response["data"].append(_d)
return api_response
except HTTPError as err:
raise err
except requests.exceptions.RequestException as err:
raise err
def get_videos_from_vimeo(client, all, page):
"""Get videos from VimeoAPI"""
per_page = 100
videos = {"total": 0, "data": []}
def getter(page, per_page, videos):
params = {
"page": page,
"per_page": per_page,
"fields": "uri,name,duration,stats,privacy,embed.html,pictures.sizes",
}
query_params = urllib.parse.urlencode(params)
try:
res = client.get(f"/me/videos?{query_params}")
res.raise_for_status()
res_json = res.json()
videos["data"] += res_json["data"]
videos["total"] = res_json["total"]
# 再帰しない
if not all:
return videos
# 再帰する
if int(res_json.get("total", 0)) - (per_page * int(page)) > 0:
page = int(page) + 1
query_params = urllib.parse.urlencode(params)
getter(page, per_page, videos)
return videos
except HTTPError as err:
raise err
except requests.exceptions.RequestException as err:
raise err
try:
vimeo_json = getter(page, per_page, videos)
except HTTPError as err:
raise err
except requests.exceptions.RequestException as err:
raise err
api_response = {}
api_response["total"] = vimeo_json.get("total", None)
api_response["data"] = []
for d in vimeo_json.get("data", []):
_d = {}
_d["uri"] = d.get("uri", None)
_d["name"] = d.get("name", None)
_d["duration"] = d.get("duration", None)
_d["stats"] = d.get("stats", {})
_d["privacy"] = {}
_d["privacy"]["view"] = d.get("privacy", {}).get("view", None)
_d["html"] = d.get("embed", {}).get("html", None)
_d["thumbnail"] = {}
if (pictures := d.get("pictures", {}).get("sizes", None)) is not None:
*_, max_size = pictures
_d["thumbnail"] = max_size
api_response["data"].append(_d)
return api_response
def upload_thumbnail(client, video_id, tmp_path):
"""Upload thumbnail to Vimeo"""
try:
res = client.upload_picture(f"/videos/{video_id}", tmp_path, activate=True)
return res
except HTTPError as err:
print("HTTPError", err)
raise err
except requests.exceptions.RequestException as err:
print(err)
raise err
except BaseException as err:
print("BaseException", err)
raise err
except Exception as err:
print("Exception", err)
raise (err)
if __name__ == "__main__":
"""Run locally"""
client = create_vimeo_client()
try:
# get_videos_from_vimeo(client)
upload_thumbnail(client)
except BaseException as err:
raise err
| 30.609665
| 83
| 0.568861
|
5bc960776f57727dd057c02268fa04baf971a275
| 99
|
py
|
Python
|
L15:Adding and Blending Images/add.py
|
animeshsrivastava24/OPEN_CV
|
cee2a4ffbc738a282e280ad531d7beb1ba476ad6
|
[
"MIT"
] | 9
|
2017-06-08T16:53:43.000Z
|
2021-05-18T11:15:03.000Z
|
L15:Adding and Blending Images/add.py
|
animeshsrivastava24/OPEN_CV
|
cee2a4ffbc738a282e280ad531d7beb1ba476ad6
|
[
"MIT"
] | null | null | null |
L15:Adding and Blending Images/add.py
|
animeshsrivastava24/OPEN_CV
|
cee2a4ffbc738a282e280ad531d7beb1ba476ad6
|
[
"MIT"
] | 12
|
2018-03-15T11:15:40.000Z
|
2021-01-28T20:43:58.000Z
|
import numpy as np
import cv2
x = np.uint8([250])
y = np.uint8([10])
print cv2.add(x,y)
print x+y
| 12.375
| 19
| 0.656566
|
a347f437390b8185fef80b7044037b1448ad33f3
| 5,173
|
py
|
Python
|
configs/ruby/AMD_Base_Constructor.py
|
He-Liu-ooo/Computer-Architecture-THUEE-2022-spring-
|
9d36aaacbc7eea357608524113bec97bae2ea229
|
[
"BSD-3-Clause"
] | 16
|
2020-09-24T00:17:36.000Z
|
2021-08-12T06:11:52.000Z
|
configs/ruby/AMD_Base_Constructor.py
|
He-Liu-ooo/Computer-Architecture-THUEE-2022-spring-
|
9d36aaacbc7eea357608524113bec97bae2ea229
|
[
"BSD-3-Clause"
] | 5
|
2021-01-27T23:09:06.000Z
|
2022-01-07T03:19:39.000Z
|
configs/ruby/AMD_Base_Constructor.py
|
He-Liu-ooo/Computer-Architecture-THUEE-2022-spring-
|
9d36aaacbc7eea357608524113bec97bae2ea229
|
[
"BSD-3-Clause"
] | 15
|
2020-11-18T00:15:28.000Z
|
2021-12-12T03:18:34.000Z
|
# Copyright (c) 2015-2017 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath, convert
from .CntrlBase import *
addToPath('../')
from topologies.Cluster import Cluster
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 1
resourceStalls = False
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = TreePLRURP()
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 10
resourceStalls = False
def create(self, size, assoc, options):
self.size = MemorySize(size)
self.assoc = assoc
self.replacement_policy = TreePLRURP()
class CPCntrl(AMD_Base_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.cntrl_id = self.cntrlCount()
self.L1Icache = L1Cache()
self.L1Icache.create(options.l1i_size, options.l1i_assoc, options)
self.L1D0cache = L1Cache()
self.L1D0cache.create(options.l1d_size, options.l1d_assoc, options)
self.L1D1cache = L1Cache()
self.L1D1cache.create(options.l1d_size, options.l1d_assoc, options)
self.L2cache = L2Cache()
self.L2cache.create(options.l2_size, options.l2_assoc, options)
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def define_options(parser):
parser.add_option("--cpu-to-dir-latency", type="int", default=15)
def construct(options, system, ruby_system):
if (buildEnv['PROTOCOL'] != 'GPU_VIPER' or
buildEnv['PROTOCOL'] != 'GPU_VIPER_Region' or
buildEnv['PROTOCOL'] != 'GPU_VIPER_Baseline'):
panic("This script requires VIPER based protocols \
to be built.")
cpu_sequencers = []
cpuCluster = None
cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
for i in range((options.num_cpus + 1) // 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
# Connect the CP controllers to the ruby network
cp_cntrl.requestFromCore = ruby_system.network.slave
cp_cntrl.responseFromCore = ruby_system.network.slave
cp_cntrl.unblockFromCore = ruby_system.network.slave
cp_cntrl.probeToCore = ruby_system.network.master
cp_cntrl.responseToCore = ruby_system.network.master
exec("system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
cpuCluster.add(cp_cntrl)
return cpu_sequencers, cpuCluster
| 38.604478
| 78
| 0.714672
|
314ceeb981c2767361e99f528ba33b693d1a8faf
| 3,506
|
py
|
Python
|
GMM/segment.py
|
zzhmark/insitu-GMM-on-umap
|
01f003475a9c82f98aee5aab6af874732d3049bb
|
[
"MIT"
] | null | null | null |
GMM/segment.py
|
zzhmark/insitu-GMM-on-umap
|
01f003475a9c82f98aee5aab6af874732d3049bb
|
[
"MIT"
] | null | null | null |
GMM/segment.py
|
zzhmark/insitu-GMM-on-umap
|
01f003475a9c82f98aee5aab6af874732d3049bb
|
[
"MIT"
] | null | null | null |
import numpy as np
from typing import Tuple, List
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
import pandas as pd
def gmm(data: np.ndarray, n: int, method: str = 'default') -> \
Tuple[List[int], List[int]]:
"""
:param data: list of input
:param n: number of components
:param method: 'default' or 'bayesian'
:return: (labels, means)
"""
# To avoid error, the number of components should be
# no more than the length of input data.
noc = min(len(data), n)
if method.lower() == 'bayesian':
model = BayesianGaussianMixture(n_components=noc, random_state=123)
model.fit(data)
else:
model = GaussianMixture(n_components=noc, random_state=123)
model.fit(data)
return model.predict(data), model.means_
def global_gmm(expr: np.ndarray, n: int):
"""
:param expr: a list of values representing expression
:param n: number of components
:return: (labels, means table)
Solve GMM for the heat map, iteratively find
the minimalist mean of GMM models and separate the
corresponding points.
"""
# Down sample the images and masks to reduce calculation.
label_out = np.zeros(expr.shape, dtype=np.uint8)
min_expr = np.min(expr)
expr_copy = expr - min_expr
nol = 0 # The count of labels.
model_out = pd.DataFrame(columns=[0, 1])
global_mean = np.mean(expr_copy)
while True:
labels, means = gmm(expr_copy, n)
max_pts, max_mean = labels == np.argmax(means), max(means)
# When the minimum mean reach the global mean, break the loop.
if max_mean < global_mean:
break
# Otherwise, label the points in the output mask,
# and dump them in the next run.
nol += 1
label_out[max_pts] = nol
expr_copy[max_pts] = 0
model_out = model_out.append([[nol, max_mean + min_expr]])
model_out.columns = ['label', 'mean']
model_out = model_out.set_index('label')
return label_out, model_out
def local_gmm(coord: np.ndarray, label: np.ndarray,
global_model: pd.DataFrame, n: int) -> \
Tuple[np.ndarray, np.ndarray, pd.DataFrame]:
"""
:param coord: a list of coordinates
:param global_model: parameters of global gmm, pandas data frame
:param n: maximum number of components for the bayesian algorithm
:return: (labels, means table)
Solve GMM for points' local distribution within
each grayscale level generated by the global model.
"""
label_out = np.zeros(label.shape)
model_out = pd.DataFrame(columns=['label', 'mean'])
# Iterate over different expression levels in the global model.
for i, mean in zip(global_model.index, global_model['mean']):
pts = coord[label == i] # Retrieve points with a specific label.
labels = gmm(pts, n, 'bayesian')[0]
# Adjust labels from 0..n-1 to 1..n.
# Because labels can be discontinuous.
levels = np.unique(labels)
labels = [np.where(levels == i)[0][0] + 1 for i in labels]
# Label the areas on the output mask.
start = np.max(label_out)
for p, label in zip(pts, labels):
label_out[tuple(p)] = start + label
model = pd.DataFrame({'label': [*range(start, start + max(labels))],
'mean': [mean] * max(labels)})
model_out = model_out.append(model)
model_out = model_out.set_index('label')
return label_out, model_out
| 38.527473
| 76
| 0.640901
|
d5e005aa4b41a9219bda0fdc13fd81e0f58a6d08
| 5,248
|
py
|
Python
|
assignments/assignment-4/src/lr_mnist.py
|
JakubR12/cds-visual
|
5fa90ecb39ddfee5365478d4d6d2ed4a9cddc364
|
[
"MIT"
] | 1
|
2021-05-19T10:56:59.000Z
|
2021-05-19T10:56:59.000Z
|
assignments/assignment-4/src/lr_mnist.py
|
JakubR12/cds-visual-portfolio
|
40c991f3fc9a3bc4bc2448e76d7d795cd67ab757
|
[
"MIT"
] | null | null | null |
assignments/assignment-4/src/lr_mnist.py
|
JakubR12/cds-visual-portfolio
|
40c991f3fc9a3bc4bc2448e76d7d795cd67ab757
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# importin modules
import argparse
import os
import sys
sys.path.append(os.path.join(".."))
import numpy as np
# Import utils for classification, preprocessing
import utils.classifier_utils as clf_util
import utils.ml_preprocessing as mlp
# Import sklearn metrics
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
'''
There are 7 arguments which can but do not have to be specified:
flags: -tr, --train_size, default: 0.8: description: int or float, a proportion of the data the model to be trained on
flags: -ts, --test_size: default: 0.2, description: int or float, a proportion of the data the model to be trained on
flags: -r, --random_state: default: None, description: int, a random seed
flags: -sm, --save_metrics: default: None, description: bool, whether to save the metrics
flags: -mf, --metrics_safe, default: logistic_regression_metrics.txt, description: str, the filename of the metrics with the .txt ending
flags: -t, --tolerance, default: 0.1, description: float, Tolerance for stopping criteria
flags: -p, --penalty, default: None, description: "none" "l1", ‘l2’, "elasticnet
examples:
python lr-mnist.py -tr 0.7 -ts 0.3 -r 2 -sm -mf log_reg_filename.txt -t 0.001 -p l2
When using boolean flags (-sm), just leave it empty.
'''
def main(train_size = 0.8,
test_size = 0.2,
random_state = None,
save_metrics = True,
metrics_filename = "logistic_regression_metrics.txt",
penalty= "none",
tolerance = 0.1):
"""
This functions trains a logistic regression classifier on the mnist data set and prints the metrics.
input:
train_size, default: 0.8: description: int or float, a proportion of the data the model to be trained on
test_size: default: 0.2, description: int or float, a proportion of the data the model to be trained on
random_state: default: None, description: int, a random seed
save_metrics: default: True, description: bool, whether to save the metrics
metrics_filename, default: logistic_regression_metrics.txt, description: str, the filename of the metrics with the .txt ending
tolerance, default: 0.1, description: float, Tolerance for stopping criteria
penalty, default: None, description: "none" "l1", ‘l2’, "elasticnet
output:
classification report as a string. Can optionally be saved as a txt file
"""
print("Preparing data. This may take a while ...")
# using a function we built in the ml_preprocessing.py script to load, format, split and scale the data -> we get a clean data
X_train, X_test, y_train, y_test = mlp.fetch_visual_data(train_size = train_size, test_size = test_size, random_state = random_state)
print("The MNIST dataset has been loaded, split and scaled.")
# calling a logistic regression with customizable arguments
clf = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
multi_class='multinomial').fit(X_train, y_train)
# calculating predictions for the test data, printing output metrics
y_pred = clf.predict(X_test)
cm = metrics.classification_report(y_test, y_pred)
print(cm)
# optional argument to save the data in the out file as a text file
if save_metrics == True:
filepath = os.path.join("..","models",metrics_filename)
text_file = open(filepath, "w")
text_file.write(cm)
text_file.close()
print("The metric was saved into models folder")
if __name__=="__main__":
# define comman line interface arguments
ap = argparse.ArgumentParser()
ap.add_argument("-tr", "--train_size", required = False, default = 0.8,
help = "int or float: A proportion of the data the model to be trained on")
ap.add_argument("-ts", "--test_size", required = False, default = 0.2,
help = "int or float: A proportion of the data the model to be tested on")
ap.add_argument("-r", "--random_state", required = False, default = None, type = int,
help = "int: a random seed")
ap.add_argument("-sm","--save_metrics", required = False, action = "store_true",
help = "bool: whether to save the metrics")
ap.add_argument("-mf", "--metrics_filename", required = False, default = "logistic_regression_metrics.txt",type = str,
help = "the filename of the metrics with the .txt ending")
ap.add_argument("-t", "--tolerance", required = False, default = 0.1, type = float,
help = "float: Tolerance for stopping criteria")
ap.add_argument("-p", "--penalty", required = False, default = "none", type = str,
help = '''penalty: "none" "l1", ‘l2’, "elasticnet"''')
# parse arguments, parse the argumets and returns them as a list of arguments/variables
args = vars(ap.parse_args())
print(args)
# instead of listing all comman line interface arguments separately, we can list all of them at once with **args
main(**args)
| 43.733333
| 137
| 0.663872
|
f830b5538776854237f2bef03ef8992abf59c5d8
| 2,183
|
py
|
Python
|
src/pyinterp/__init__.py
|
apatlpo/pangeo-pyinterp
|
b5242c6869d7e601a5695b304c81992deb63367d
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyinterp/__init__.py
|
apatlpo/pangeo-pyinterp
|
b5242c6869d7e601a5695b304c81992deb63367d
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyinterp/__init__.py
|
apatlpo/pangeo-pyinterp
|
b5242c6869d7e601a5695b304c81992deb63367d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""
Common classes
--------------
"""
from typing import Tuple
import numpy as np
from . import core
from . import interface
class GridInterpolator:
"""Abstract class of interpolation of numerical grids.
Args:
*args (tuple): Constructor's arguments.
.. warning::
This class should not be instantiated directly.
"""
_CLASS = None
_INTEROLATOR = None
def __init__(self, *args):
self._class = self._CLASS + interface._core_suffix(args[-1])
self._instance = getattr(core, self._class)(*args)
@classmethod
def _n_variate_interpolator(cls, interpolator: str, **kwargs):
if interpolator == "bilinear":
return getattr(core, "Bilinear" + cls._INTEROLATOR)(**kwargs)
elif interpolator == "nearest":
return getattr(core, "Nearest" + cls._INTEROLATOR)(**kwargs)
elif interpolator == "inverse_distance_weighting":
return getattr(core, "InverseDistanceWeighting" +
cls._INTEROLATOR)(**kwargs)
raise ValueError(f"interpolator {interpolator!r} is not defined")
@property
def x(self) -> core.Axis:
"""
Gets the X-Axis handled by this instance
Returns:
pyinterp.core.Axis: X-Axis
"""
return self._instance.x
@property
def y(self) -> core.Axis:
"""
Gets the Y-Axis handled by this instance
Returns:
pyinterp.core.Axis: Y-Axis
"""
return self._instance.y
@property
def array(self) -> np.ndarray:
"""
Gets the values handled by this instance
Returns:
numpy.ndarray: values
"""
return self._instance.array
def __getstate__(self) -> Tuple:
return (self._class, self._instance.__getstate__())
def __setstate__(self, state) -> None:
self._class, state = state
self._instance = getattr(getattr(core, self._class),
"_setstate")(state)
| 26.950617
| 73
| 0.601466
|
10b6b0d236eb9914fb71847831c77aaa78f544b6
| 10,197
|
py
|
Python
|
metno_locationforecast/forecast.py
|
Amir-101/metno-locationforecast
|
6332b4dca45ee35ecb671b36859533776600dcbc
|
[
"MIT"
] | null | null | null |
metno_locationforecast/forecast.py
|
Amir-101/metno-locationforecast
|
6332b4dca45ee35ecb671b36859533776600dcbc
|
[
"MIT"
] | null | null | null |
metno_locationforecast/forecast.py
|
Amir-101/metno-locationforecast
|
6332b4dca45ee35ecb671b36859533776600dcbc
|
[
"MIT"
] | null | null | null |
"""Where the magic happens.
Classes:
Forecast: Stores forecast data, has methods for updating, saving and loading
data.
"""
import datetime as dt
import json
from pathlib import Path
from typing import Optional, Dict, Union
import requests
from .config import Config
from .data_containers import Interval, Place, Variable, Data
YR_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
HTTP_DATETIME_FORMAT = "%a, %d %b %Y %H:%M:%S %Z"
CONFIG = Config()
class Forecast:
"""Retrieves, stores and updates forecast data.
Attributes:
place (Place): Location for the forecast.
forecast_type: The type of forecast.
user_agent: the user agent to be sent with requests.
save_location (Path): Location to cache data.
base_url: Base url to make requests to.
response (requests.Response): Response object.
json_string (str): Json data as a string.
json: Json data as an object.
data (dict): Weather data.
Methods:
save: Save data to save location.
load: Load data from saved file.
update: Update forecast data.
"""
forecast_types = {"compact", "complete"}
def __init__(
self,
place: Place,
user_agent: Optional[str] = None,
forecast_type: Optional[str] = None,
save_location: Optional[str] = None,
base_url: Optional[str] = None,
):
"""Create a Forecast object.
Args:
place: Place object for the forecast
user_agent: The user-agent identifier to be sent with the request
forecast_type: The type of foreast to retrieve
save_location: Optional; Location to cache data
base_url: Optional; URL to make requests to
"""
if not isinstance(place, Place):
msg = f"{place} is not a metno_locationforecast.Place object."
raise TypeError(msg)
self.place = place
if user_agent is None:
if CONFIG.user_agent is None:
msg = (
"User agent has not been provided. This must be passed as an argument or set "
"as a configuration."
)
raise ValueError(msg)
self.user_agent = CONFIG.user_agent
else:
self.user_agent = user_agent
if forecast_type is None:
self.forecast_type = CONFIG.forecast_type
else:
self.forecast_type = forecast_type
if save_location is None:
self.save_location = Path(CONFIG.save_location)
else:
self.save_location = Path(save_location)
if base_url is None:
self.base_url = CONFIG.base_url
else:
self.base_url = base_url
if (
self.base_url == "https://api.met.no/weatherapi/locationforecast/2.0/"
and self.forecast_type not in Forecast.forecast_types
):
msg = (
f"{self.forecast_type} is not an available forecast type. Available types are: "
f"{Forecast.forecast_types}."
)
raise ValueError(msg)
# Typing information for mypy.
self.response: requests.Response
self.json_string: str
self.json: dict # type: ignore
self.data: Data
def __repr__(self) -> str:
return (
f"Forecast({self.place}, {self.user_agent}, {self.forecast_type}, "
f"{self.save_location}, {self.base_url})"
)
def __str__(self) -> str:
if not hasattr(self, "data"):
return "No forecast data yet."
forecast_string = f"Forecast for {self.place.name}:"
for interval in self.data.intervals:
lines = str(interval).split("\n")
forecast_string += f"\n\t{lines[0]}"
for line in lines[1:]:
forecast_string += f"\n\t{line}"
return forecast_string
@property
def url(self) -> str:
"""The url for requests."""
return f"{self.base_url}{self.forecast_type}"
@property
def url_parameters(self,) -> Dict[str, Union[int, float]]:
"""Parameters to be sent with request."""
parameters: Dict[str, Union[int, float]] = {}
if self.place.coordinates["latitude"] is not None:
parameters["lat"] = self.place.coordinates["latitude"]
if self.place.coordinates["longitude"] is not None:
parameters["lon"] = self.place.coordinates["longitude"]
if self.place.coordinates["altitude"] is not None:
parameters["altitude"] = self.place.coordinates["altitude"]
return parameters
@property
def url_headers(self) -> Dict[str, str]:
"""Headers to be sent with request."""
headers = {
"User-Agent": self.user_agent,
}
if hasattr(self, "data"):
headers["If-Modified-Since"] = (
self.data.last_modified.strftime(HTTP_DATETIME_FORMAT) + "GMT"
)
return headers
@property
def file_name(self) -> str:
"""File name for caching data."""
return (
f"lat{self.place.coordinates['latitude']}lon{self.place.coordinates['longitude']}"
+ f"altitude{self.place.coordinates['altitude']}_{self.forecast_type}.json"
)
def _json_from_response(self) -> None:
"""Create json data from response.
Side Effects:
self.json_string
self.json
"""
if self.response.status_code == 304:
self.json["status_code"] = self.response.status_code
self.json["headers"] = dict(self.response.headers)
self.json_string = json.dumps(self.json)
else:
json_string = "{"
json_string += f'"status_code":{self.response.status_code},'
json_string += f'"headers":{json.dumps(dict(self.response.headers))},'
json_string += f'"data":{self.response.text}'
json_string += "}"
self.json_string = json_string
self.json = json.loads(json_string)
def _parse_json(self) -> None:
"""Retrieve weather data from json data.
Side Effects:
self.data
"""
json = self.json
last_modified = dt.datetime.strptime(json["headers"]["Last-Modified"], HTTP_DATETIME_FORMAT)
expires = dt.datetime.strptime(json["headers"]["Expires"], HTTP_DATETIME_FORMAT)
updated_at = dt.datetime.strptime(
json["data"]["properties"]["meta"]["updated_at"], YR_DATETIME_FORMAT
)
units = json["data"]["properties"]["meta"]["units"]
intervals = []
for timeseries in json["data"]["properties"]["timeseries"]:
start_time = dt.datetime.strptime(timeseries["time"], YR_DATETIME_FORMAT)
variables = {}
for var_name, var_value in timeseries["data"]["instant"]["details"].items():
variables[var_name] = Variable(var_name, var_value, units[var_name])
# Take the shortest time interval available.
hours = 0
if "next_1_hours" in timeseries["data"]:
hours = 1
elif "next_6_hours" in timeseries["data"]:
hours = 6
elif "next_12_hours" in timeseries["data"]:
hours = 12
end_time = start_time + dt.timedelta(hours=hours)
if hours != 0:
symbol_code = timeseries["data"][f"next_{hours}_hours"]["summary"]["symbol_code"]
for var_name, var_value in timeseries["data"][f"next_{hours}_hours"][
"details"
].items():
variables[var_name] = Variable(var_name, var_value, units[var_name])
else:
symbol_code = None
intervals.append(Interval(start_time, end_time, symbol_code, variables))
self.data = Data(last_modified, expires, updated_at, units, intervals)
def _data_outdated(self) -> bool:
return self.data.expires < dt.datetime.utcnow()
def save(self) -> None:
"""Save data to save location."""
if not self.save_location.exists():
self.save_location.mkdir(parents=True)
elif not self.save_location.is_dir():
raise NotADirectoryError(f"Expected {self.save_location} to be a directory.")
file_path = Path(self.save_location).joinpath(self.file_name)
file_path.write_text(self.json_string)
def load(self) -> None:
"""Load data from saved file."""
file_path = Path(self.save_location).joinpath(self.file_name)
self.json_string = file_path.read_text()
self.json = json.loads(self.json_string)
self._parse_json()
def update(self) -> str:
"""Update forecast data.
Will make a request to the MET API for data and will save the data to
the 'save_location'. If data already exists for the forecast this will
only request new data if the data has expired and will make the request
using the appropriate 'If-Modified-Since' header.
Returns:
"Data-Not-Expired": If the data has not expired yet.
"Data-Not-Modified": If data has expired but has not been modified
yet.
"Data-Modified": If new data has been acquired.
"""
return_status = ""
if not hasattr(self, "data"):
file_path = Path(self.save_location).joinpath(self.file_name)
if file_path.exists():
self.load()
if hasattr(self, "data") and not self._data_outdated():
return_status = "Data-Not-Expired"
return return_status
self.response = requests.get(self.url, params=self.url_parameters, headers=self.url_headers)
if self.response.status_code == 304:
return_status = "Data-Not-Modified"
else:
self.response.raise_for_status()
return_status = "Data-Modified"
self._json_from_response()
self.save()
self._parse_json()
return return_status
| 33.99
| 100
| 0.589683
|
98380f23dfcf279b597aa81500e73121c3b6a0c7
| 840
|
py
|
Python
|
.guides/tests/part9.py
|
nejohnston/processing-images
|
d45d8e351cc960703cb9f1975ae407fa71e7f205
|
[
"MIT"
] | null | null | null |
.guides/tests/part9.py
|
nejohnston/processing-images
|
d45d8e351cc960703cb9f1975ae407fa71e7f205
|
[
"MIT"
] | null | null | null |
.guides/tests/part9.py
|
nejohnston/processing-images
|
d45d8e351cc960703cb9f1975ae407fa71e7f205
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python3
"""
Assess part 9, before submission (flip)
This file is insecurely available to students, but if they find it and modify it, they
really did not need this course.
Author: Walker M. White
Date: July 31, 2018
"""
import verifier
import sys
def check_func9(file):
"""
Checks that the test cases are correct
Parameter file: The file to check
Precondition: file is a string
"""
func = 'flip'
opts = {'vertical':True}
result = verifier.grade_docstring(file,0)
if not result[0]:
result = verifier.grade_func(file,func,{},0)
if not result[0]:
result = verifier.grade_func(file,func,opts,0)
if not result[0]:
print("The 'flip' operations look correct.")
return result[0]
if __name__ == '__main__':
sys.exit(check_func9('plugins.py'))
| 23.333333
| 86
| 0.661905
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.