_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q279100
Worker.work
test
def work(self, socket, call, args, kwargs, topics=()): """Calls a function and send results to the collector. It supports all of function actions. A function could return, yield, raise any packable objects. """ task_id = uuid4_bytes() reply_socket, topics = self.replier(socket, topics, call.reply_to) if reply_socket: channel = (call.call_id, task_id, topics) else: channel = (None, None, None) f, rpc_spec = self.find_call_target(call) if rpc_spec.reject_if.__get__(self.app)(call, topics): reply_socket and self.reject(reply_socket, call.call_id, topics) return reply_socket and self.accept(reply_socket, channel) success = False with self.catch_exceptions(): try: val = self.call(call, args, kwargs, f, rpc_spec) except: exc_info = sys.exc_info() self.raise_(reply_socket, channel, exc_info) reraise(*exc_info) success = True if not success: # catch_exceptions() hides exceptions. return if isinstance(val, Iterator): vals = val with self.catch_exceptions(): try: try: val = next(vals) except StopIteration: pass else: self.send_reply(reply_socket, YIELD, val, *channel) for val in vals: self.send_reply(reply_socket, YIELD, val, *channel) self.send_reply(reply_socket, BREAK, None, *channel) except: exc_info = sys.exc_info() self.raise_(reply_socket, channel, exc_info) reraise(*exc_info) else: self.send_reply(reply_socket, RETURN, val, *channel)
python
{ "resource": "" }
q279101
Worker.accept
test
def accept(self, reply_socket, channel): """Sends ACCEPT reply.""" info = self.info or b'' self.send_raw(reply_socket, ACCEPT, info, *channel)
python
{ "resource": "" }
q279102
Worker.reject
test
def reject(self, reply_socket, call_id, topics=()): """Sends REJECT reply.""" info = self.info or b'' self.send_raw(reply_socket, REJECT, info, call_id, b'', topics)
python
{ "resource": "" }
q279103
Worker.raise_
test
def raise_(self, reply_socket, channel, exc_info=None): """Sends RAISE reply.""" if not reply_socket: return if exc_info is None: exc_info = sys.exc_info() exc_type, exc, tb = exc_info while tb.tb_next is not None: tb = tb.tb_next if issubclass(exc_type, RemoteException): exc_type = exc_type.exc_type filename, lineno = tb.tb_frame.f_code.co_filename, tb.tb_lineno val = (exc_type, str(exc), filename, lineno) try: state = exc.__getstate__() except AttributeError: pass else: val += (state,) self.send_reply(reply_socket, RAISE, val, *channel)
python
{ "resource": "" }
q279104
_Caller._call_wait
test
def _call_wait(self, hints, name, args, kwargs, topics=(), raw=False, limit=None, retry=False, max_retries=None): """Allocates a call id and emit.""" col = self.collector if not col.is_running(): col.start() call_id = uuid4_bytes() reply_to = (DUPLEX if self.socket is col.socket else col.topic) # Normal tuple is faster than namedtuple. header = self._make_header(name, call_id, reply_to, hints) payload = self._pack(args, kwargs, raw) # Use short names. def send_call(): try: safe(send, self.socket, header, payload, topics, zmq.NOBLOCK) except zmq.Again: raise Undelivered('emission was not delivered') col.prepare(call_id, self, name, args, kwargs) send_call() return col.establish(call_id, self.timeout, limit, send_call if retry else None, max_retries=max_retries)
python
{ "resource": "" }
q279105
Collector.establish
test
def establish(self, call_id, timeout, limit=None, retry=None, max_retries=None): """Waits for the call is accepted by workers and starts to collect the results. """ rejected = 0 retried = 0 results = [] result_queue = self.result_queues[call_id] try: with Timeout(timeout, False): while True: result = result_queue.get() if result is None: rejected += 1 if retry is not None: if retried == max_retries: break retry() retried += 1 continue results.append(result) if len(results) == limit: break finally: del result_queue self.remove_result_queue(call_id) if not results: if rejected: raise Rejected('%d workers rejected' % rejected if rejected != 1 else 'A worker rejected') else: raise WorkerNotFound('failed to find worker') return results
python
{ "resource": "" }
q279106
Collector.dispatch_reply
test
def dispatch_reply(self, reply, value): """Dispatches the reply to the proper queue.""" method = reply.method call_id = reply.call_id task_id = reply.task_id if method & ACK: try: result_queue = self.result_queues[call_id] except KeyError: raise KeyError('already established or unprepared call') if method == ACCEPT: worker_info = value result = RemoteResult(self, call_id, task_id, worker_info) self.results[call_id][task_id] = result result_queue.put_nowait(result) elif method == REJECT: result_queue.put_nowait(None) else: result = self.results[call_id][task_id] result.set_reply(reply.method, value)
python
{ "resource": "" }
q279107
guess_type_name
test
def guess_type_name(value): ''' Guess the type name of a serialized value. ''' value = str(value) if value.upper() in ['TRUE', 'FALSE']: return 'BOOLEAN' elif re.match(r'(-)?(\d+)(\.\d+)', value): return 'REAL' elif re.match(r'(-)?(\d+)', value): return 'INTEGER' elif re.match(r'\'((\'\')|[^\'])*\'', value): return 'STRING' elif re.match(r'\"([^\\\n]|(\\.))*?\"', value): return 'UNIQUE_ID'
python
{ "resource": "" }
q279108
deserialize_value
test
def deserialize_value(ty, value): ''' Deserialize a value of some type ''' uty = ty.upper() if uty == 'BOOLEAN': if value.isdigit(): return bool(int(value)) elif value.upper() == 'FALSE': return False elif value.upper() == 'TRUE': return True else: return None elif uty == 'INTEGER': if '"' in value: return uuid.UUID(value[1:-1]).int else: return int(value) elif uty == 'REAL': return float(value) elif uty == 'STRING': return value[1:-1].replace("''", "'") elif uty == 'UNIQUE_ID': if '"' in value: return uuid.UUID(value[1:-1]).int else: return int(value)
python
{ "resource": "" }
q279109
ModelLoader.t_LPAREN
test
def t_LPAREN(self, t): r'\(' t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279110
ModelLoader.t_RPAREN
test
def t_RPAREN(self, t): r'\)' t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279111
ElasticStore.get
test
def get(self, content_id, feature_names=None): '''Retrieve a feature collection. If a feature collection with the given id does not exist, then ``None`` is returned. :param str content_id: Content identifier. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: :class:`dossier.fc.FeatureCollection` or ``None`` ''' try: resp = self.conn.get(index=self.index, doc_type=self.type, id=eid(content_id), _source=self._source(feature_names)) return self.fc_from_dict(resp['_source']['fc']) except NotFoundError: return None except: raise
python
{ "resource": "" }
q279112
ElasticStore.get_many
test
def get_many(self, content_ids, feature_names=None): '''Returns an iterable of feature collections. This efficiently retrieves multiple FCs corresponding to the list of ids given. Tuples of identifier and feature collection are yielded. If the feature collection for a given id does not exist, then ``None`` is returned as the second element of the tuple. :param [str] content_ids: List of content ids. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``(content_id, FC)`` ''' try: resp = self.conn.mget(index=self.index, doc_type=self.type, _source=self._source(feature_names), body={'ids': map(eid, content_ids)}) except TransportError: return for doc in resp['docs']: fc = None if doc['found']: fc = self.fc_from_dict(doc['_source']['fc']) yield did(doc['_id']), fc
python
{ "resource": "" }
q279113
ElasticStore.put
test
def put(self, items, indexes=True): '''Adds feature collections to the store. This efficiently adds multiple FCs to the store. The iterable of ``items`` given should yield tuples of ``(content_id, FC)``. :param items: Iterable of ``(content_id, FC)``. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. ''' actions = [] for cid, fc in items: # TODO: If we store features in a columnar order, then we # could tell ES to index the feature values directly. ---AG # (But is problematic because we want to preserve the ability # to selectively index FCs. So we'd probably need two distinct # doc types.) idxs = defaultdict(list) if indexes: for fname in self.indexed_features: if fname in fc: idxs[fname_to_idx_name(fname)].extend(fc[fname]) for fname in self.fulltext_indexed_features: if fname not in fc: continue if isinstance(fc[fname], basestring): idxs[fname_to_full_idx_name(fname)] = fc[fname] else: idxs[fname_to_full_idx_name(fname)].extend(fc[fname]) actions.append({ '_index': self.index, '_type': self.type, '_id': eid(cid), '_op_type': 'index', '_source': dict(idxs, **{ 'fc': self.fc_to_dict(fc), }), }) bulk(self.conn, actions, timeout=60, request_timeout=60)
python
{ "resource": "" }
q279114
ElasticStore.delete
test
def delete(self, content_id): '''Deletes the corresponding feature collection. If the FC does not exist, then this is a no-op. ''' try: self.conn.delete(index=self.index, doc_type=self.type, id=eid(content_id)) except NotFoundError: pass
python
{ "resource": "" }
q279115
ElasticStore.delete_all
test
def delete_all(self): '''Deletes all feature collections. This does not destroy the ES index, but instead only deletes all FCs with the configured document type (defaults to ``fc``). ''' try: self.conn.indices.delete_mapping( index=self.index, doc_type=self.type) except TransportError: logger.warn('type %r in index %r already deleted', self.index, self.type, exc_info=True)
python
{ "resource": "" }
q279116
ElasticStore.delete_index
test
def delete_index(self): '''Deletes the underlying ES index. Only use this if you know what you're doing. This destroys the entire underlying ES index, which could be shared by multiple distinct ElasticStore instances. ''' if self.conn.indices.exists(index=self.index): self.conn.indices.delete(index=self.index)
python
{ "resource": "" }
q279117
ElasticStore.scan
test
def scan(self, *key_ranges, **kwargs): '''Scan for FCs in the given id ranges. :param key_ranges: ``key_ranges`` should be a list of pairs of ranges. The first value is the lower bound id and the second value is the upper bound id. Use ``()`` in either position to leave it unbounded. If no ``key_ranges`` are given, then all FCs in the store are returned. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``(content_id, FC)`` ''' for hit in self._scan(*key_ranges, **kwargs): yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc'])
python
{ "resource": "" }
q279118
ElasticStore.scan_ids
test
def scan_ids(self, *key_ranges, **kwargs): '''Scan for ids only in the given id ranges. :param key_ranges: ``key_ranges`` should be a list of pairs of ranges. The first value is the lower bound id and the second value is the upper bound id. Use ``()`` in either position to leave it unbounded. If no ``key_ranges`` are given, then all FCs in the store are returned. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``content_id`` ''' kwargs['feature_names'] = False for hit in self._scan(*key_ranges, **kwargs): yield did(hit['_id'])
python
{ "resource": "" }
q279119
ElasticStore.scan_prefix
test
def scan_prefix(self, prefix, feature_names=None): '''Scan for FCs with a given prefix. :param str prefix: Identifier prefix. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``(content_id, FC)`` ''' resp = self._scan_prefix(prefix, feature_names=feature_names) for hit in resp: yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc'])
python
{ "resource": "" }
q279120
ElasticStore.scan_prefix_ids
test
def scan_prefix_ids(self, prefix): '''Scan for ids with a given prefix. :param str prefix: Identifier prefix. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``content_id`` ''' resp = self._scan_prefix(prefix, feature_names=False) for hit in resp: yield did(hit['_id'])
python
{ "resource": "" }
q279121
ElasticStore.fulltext_scan
test
def fulltext_scan(self, query_id=None, query_fc=None, feature_names=None, preserve_order=True, indexes=None): '''Fulltext search. Yields an iterable of triples (score, identifier, FC) corresponding to the search results of the fulltext search in ``query``. This will only search text indexed under the given feature named ``fname``. Note that, unless ``preserve_order`` is set to True, the ``score`` will always be 0.0, and the results will be unordered. ``preserve_order`` set to True will cause the results to be scored and be ordered by score, but you should expect to see a decrease in performance. :param str fname: The feature to search. :param unicode query: The query. :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``(score, content_id, FC)`` ''' it = self._fulltext_scan(query_id, query_fc, feature_names=feature_names, preserve_order=preserve_order, indexes=indexes) for hit in it: fc = self.fc_from_dict(hit['_source']['fc']) yield hit['_score'], did(hit['_id']), fc
python
{ "resource": "" }
q279122
ElasticStore.fulltext_scan_ids
test
def fulltext_scan_ids(self, query_id=None, query_fc=None, preserve_order=True, indexes=None): '''Fulltext search for identifiers. Yields an iterable of triples (score, identifier) corresponding to the search results of the fulltext search in ``query``. This will only search text indexed under the given feature named ``fname``. Note that, unless ``preserve_order`` is set to True, the ``score`` will always be 0.0, and the results will be unordered. ``preserve_order`` set to True will cause the results to be scored and be ordered by score, but you should expect to see a decrease in performance. :param str fname: The feature to search. :param unicode query: The query. :rtype: Iterable of ``(score, content_id)`` ''' it = self._fulltext_scan(query_id, query_fc, feature_names=False, preserve_order=preserve_order, indexes=indexes) for hit in it: yield hit['_score'], did(hit['_id'])
python
{ "resource": "" }
q279123
ElasticStore.keyword_scan
test
def keyword_scan(self, query_id=None, query_fc=None, feature_names=None): '''Keyword scan for feature collections. This performs a keyword scan using the query given. A keyword scan searches for FCs with terms in each of the query's indexed fields. At least one of ``query_id`` or ``query_fc`` must be provided. If ``query_fc`` is ``None``, then the query is retrieved automatically corresponding to ``query_id``. :param str query_id: Optional query id. :param query_fc: Optional query feature collection. :type query_fc: :class:`dossier.fc.FeatureCollection` :param [str] feature_names: A list of feature names to retrieve. When ``None``, all features are retrieved. Wildcards are allowed. :rtype: Iterable of ``(content_id, FC)`` ''' it = self._keyword_scan(query_id, query_fc, feature_names=feature_names) for hit in it: fc = self.fc_from_dict(hit['_source']['fc']) yield did(hit['_id']), fc
python
{ "resource": "" }
q279124
ElasticStore.keyword_scan_ids
test
def keyword_scan_ids(self, query_id=None, query_fc=None): '''Keyword scan for ids. This performs a keyword scan using the query given. A keyword scan searches for FCs with terms in each of the query's indexed fields. At least one of ``query_id`` or ``query_fc`` must be provided. If ``query_fc`` is ``None``, then the query is retrieved automatically corresponding to ``query_id``. :param str query_id: Optional query id. :param query_fc: Optional query feature collection. :type query_fc: :class:`dossier.fc.FeatureCollection` :rtype: Iterable of ``content_id`` ''' it = self._keyword_scan(query_id, query_fc, feature_names=False) for hit in it: yield did(hit['_id'])
python
{ "resource": "" }
q279125
ElasticStore.index_scan_ids
test
def index_scan_ids(self, fname, val): '''Low-level keyword index scan for ids. Retrieves identifiers of FCs that have a feature value ``val`` in the feature named ``fname``. Note that ``fname`` must be indexed. :param str fname: Feature name. :param str val: Feature value. :rtype: Iterable of ``content_id`` ''' disj = [] for fname2 in self.indexes[fname]['feature_names']: disj.append({'term': {fname_to_idx_name(fname2): val}}) query = { 'constant_score': { 'filter': {'or': disj}, }, } hits = scan(self.conn, index=self.index, doc_type=self.type, query={ '_source': False, 'query': query, }) for hit in hits: yield did(hit['_id'])
python
{ "resource": "" }
q279126
ElasticStore._source
test
def _source(self, feature_names): '''Maps feature names to ES's "_source" field.''' if feature_names is None: return True elif isinstance(feature_names, bool): return feature_names else: return map(lambda n: 'fc.' + n, feature_names)
python
{ "resource": "" }
q279127
ElasticStore._range_filters
test
def _range_filters(self, *key_ranges): 'Creates ES filters for key ranges used in scanning.' filters = [] for s, e in key_ranges: if isinstance(s, basestring): s = eid(s) if isinstance(e, basestring): # Make the range inclusive. # We need a valid codepoint, so use the max. e += u'\U0010FFFF' e = eid(e) if s == () and e == (): filters.append({'match_all': {}}) elif e == (): filters.append({'range': {'_id': {'gte': s}}}) elif s == (): filters.append({'range': {'_id': {'lte': e}}}) else: filters.append({'range': {'_id': {'gte': s, 'lte': e}}}) if len(filters) == 0: return [{'match_all': {}}] else: return filters
python
{ "resource": "" }
q279128
ElasticStore._create_index
test
def _create_index(self): 'Create the index' try: self.conn.indices.create( index=self.index, timeout=60, request_timeout=60, body={ 'settings': { 'number_of_shards': self.shards, 'number_of_replicas': self.replicas, }, }) except TransportError: # Hope that this is an "index already exists" error... logger.warn('index already exists? OK', exc_info=True) pass
python
{ "resource": "" }
q279129
ElasticStore._create_mappings
test
def _create_mappings(self): 'Create the field type mapping.' self.conn.indices.put_mapping( index=self.index, doc_type=self.type, timeout=60, request_timeout=60, body={ self.type: { 'dynamic_templates': [{ 'default_no_analyze_fc': { 'match': 'fc.*', 'mapping': {'index': 'no'}, }, }], '_all': { 'enabled': False, }, '_id': { 'index': 'not_analyzed', # allows range queries }, 'properties': self._get_index_mappings(), }, }) # It is possible to create an index and quickly launch a request # that will fail because the index hasn't been set up yet. Usually, # you'll get a "no active shards available" error. # # Since index creation is a very rare operation (it only happens # when the index doesn't already exist), we sit and wait for the # cluster to become healthy. self.conn.cluster.health(index=self.index, wait_for_status='yellow')
python
{ "resource": "" }
q279130
ElasticStore._get_index_mappings
test
def _get_index_mappings(self): 'Retrieve the field mappings. Useful for debugging.' maps = {} for fname in self.indexed_features: config = self.indexes.get(fname, {}) print(fname, config) maps[fname_to_idx_name(fname)] = { 'type': config.get('es_index_type', 'integer'), 'store': False, 'index': 'not_analyzed', } for fname in self.fulltext_indexed_features: maps[fname_to_full_idx_name(fname)] = { 'type': 'string', 'store': False, 'index': 'analyzed', } return maps
python
{ "resource": "" }
q279131
ElasticStore._get_field_types
test
def _get_field_types(self): 'Retrieve the field types. Useful for debugging.' mapping = self.conn.indices.get_mapping( index=self.index, doc_type=self.type) return mapping[self.index]['mappings'][self.type]['properties']
python
{ "resource": "" }
q279132
ElasticStore._fc_index_disjunction_from_query
test
def _fc_index_disjunction_from_query(self, query_fc, fname): 'Creates a disjunction for keyword scan queries.' if len(query_fc.get(fname, [])) == 0: return [] terms = query_fc[fname].keys() disj = [] for fname in self.indexes[fname]['feature_names']: disj.append({'terms': {fname_to_idx_name(fname): terms}}) return disj
python
{ "resource": "" }
q279133
ElasticStore.fc_bytes
test
def fc_bytes(self, fc_dict): '''Take a feature collection in dict form and count its size in bytes. ''' num_bytes = 0 for _, feat in fc_dict.iteritems(): num_bytes += len(feat) return num_bytes
python
{ "resource": "" }
q279134
ElasticStore.count_bytes
test
def count_bytes(self, filter_preds): '''Count bytes of all feature collections whose key satisfies one of the predicates in ``filter_preds``. The byte counts are binned by filter predicate. ''' num_bytes = defaultdict(int) for hit in self._scan(): for filter_pred in filter_preds: if filter_pred(did(hit['_id'])): num_bytes[filter_pred] += self.fc_bytes( hit['_source']['fc']) return num_bytes
python
{ "resource": "" }
q279135
pretty_string
test
def pretty_string(fc): '''construct a nice looking string for an FC ''' s = [] for fname, feature in sorted(fc.items()): if isinstance(feature, StringCounter): feature = [u'%s: %d' % (k, v) for (k,v) in feature.most_common()] feature = u'\n\t' + u'\n\t'.join(feature) s.append(fname + u': ' + feature) return u'\n'.join(s)
python
{ "resource": "" }
q279136
process_docopts
test
def process_docopts(): # type: ()->None """ Take care of command line options """ arguments = docopt(__doc__, version="Find Known Secrets {0}".format(__version__)) logger.debug(arguments) # print(arguments) if arguments["here"]: # all default go() else: # user config files = arguments["--secrets"] searcher = Searcher(source=arguments["--source"], files=files) searcher.go()
python
{ "resource": "" }
q279137
default_formatter
test
def default_formatter(error): """Escape the error, and wrap it in a span with class ``error-message``""" quoted = formencode.htmlfill.escape_formatter(error) return u'<span class="error-message">{0}</span>'.format(quoted)
python
{ "resource": "" }
q279138
pretty_to_link
test
def pretty_to_link(inst, link): ''' Create a human-readable representation of a link on the 'TO'-side ''' values = '' prefix = '' metaclass = xtuml.get_metaclass(inst) for name, ty in metaclass.attributes: if name in link.key_map: value = getattr(inst, name) value = xtuml.serialize_value(value, ty) name = link.key_map[name] values += '%s%s=%s' % (prefix, name, value) prefix = ', ' return '%s(%s)' % (link.kind, values)
python
{ "resource": "" }
q279139
pretty_unique_identifier
test
def pretty_unique_identifier(inst, identifier): ''' Create a human-readable representation a unique identifier. ''' values = '' prefix = '' metaclass = xtuml.get_metaclass(inst) for name, ty in metaclass.attributes: if name in metaclass.identifying_attributes: value = getattr(inst, name) value = xtuml.serialize_value(value, ty) values += '%s%s=%s' % (prefix, name, value) prefix = ', ' return '%s(%s)' % (identifier, values)
python
{ "resource": "" }
q279140
check_uniqueness_constraint
test
def check_uniqueness_constraint(m, kind=None): ''' Check the model for uniqueness constraint violations. ''' if kind is None: metaclasses = m.metaclasses.values() else: metaclasses = [m.find_metaclass(kind)] res = 0 for metaclass in metaclasses: id_map = dict() for identifier in metaclass.indices: id_map[identifier] = dict() for inst in metaclass.select_many(): # Check for null-values for name, ty in metaclass.attributes: if name not in metaclass.identifying_attributes: continue value = getattr(inst, name) isnull = value is None isnull |= (ty == 'UNIQUE_ID' and not value) if isnull: res += 1 logger.warning('%s.%s is part of an identifier and is null' % (metaclass.kind, name)) # Check uniqueness for identifier in metaclass.indices: kwargs = dict() for name in metaclass.indices[identifier]: kwargs[name] = getattr(inst, name) index_key = frozenset(kwargs.items()) if index_key in id_map[identifier]: res += 1 id_string = pretty_unique_identifier(inst, identifier) logger.warning('uniqueness constraint violation in %s, %s' % (metaclass.kind, id_string)) id_map[identifier][index_key] = inst return res
python
{ "resource": "" }
q279141
check_link_integrity
test
def check_link_integrity(m, link): ''' Check the model for integrity violations on an association in a particular direction. ''' res = 0 for inst in link.from_metaclass.select_many(): q_set = list(link.navigate(inst)) if(len(q_set) < 1 and not link.conditional) or ( (len(q_set) > 1 and not link.many)): res += 1 logger.warning('integrity violation in ' '%s --(%s)--> %s' % (pretty_from_link(inst, link), link.rel_id, pretty_to_link(inst, link))) return res
python
{ "resource": "" }
q279142
check_subtype_integrity
test
def check_subtype_integrity(m, super_kind, rel_id): ''' Check the model for integrity violations across a subtype association. ''' if isinstance(rel_id, int): rel_id = 'R%d' % rel_id res = 0 for inst in m.select_many(super_kind): if not xtuml.navigate_subtype(inst, rel_id): res += 1 logger.warning('integrity violation across ' '%s[%s]' % (super_kind, rel_id)) return res
python
{ "resource": "" }
q279143
feature_index
test
def feature_index(*feature_names): '''Returns a index creation function. Returns a valid index ``create`` function for the feature names given. This can be used with the :meth:`Store.define_index` method to create indexes on any combination of features in a feature collection. :type feature_names: list(unicode) :rtype: ``(val -> index val) -> (content_id, FeatureCollection) -> generator of [index val]`` ''' def _(trans, (cid, fc)): for fname in feature_names: feat = fc.get(fname) if feat is None: continue elif isinstance(feat, unicode): yield trans(feat) else: # string counter, sparse/dense vector for val in feat.iterkeys(): yield trans(val) return _
python
{ "resource": "" }
q279144
basic_transform
test
def basic_transform(val): '''A basic transform for strings and integers.''' if isinstance(val, int): return struct.pack('>i', val) else: return safe_lower_utf8(val)
python
{ "resource": "" }
q279145
Store.put
test
def put(self, items, indexes=True): '''Add feature collections to the store. Given an iterable of tuples of the form ``(content_id, feature collection)``, add each to the store and overwrite any that already exist. This method optionally accepts a keyword argument `indexes`, which by default is set to ``True``. When it is ``True``, it will *create* new indexes for each content object for all indexes defined on this store. Note that this will not update existing indexes. (There is currently no way to do this without running some sort of garbage collection process.) :param iterable items: iterable of ``(content_id, FeatureCollection)``. :type fc: :class:`dossier.fc.FeatureCollection` ''' # So why accept an iterable? Ideally, some day, `kvlayer.put` would # accept an iterable, so we should too. # # But we have to transform it to a list in order to update indexes # anyway. Well, if we don't have to update indexes, then we can avoid # loading everything into memory, which seems like an optimization # worth having even if it's only some of the time. # # N.B. If you're thinking, "Just use itertools.tee", then you should # heed this warning from Python docs: "This itertool may require # significant auxiliary storage (depending on how much temporary data # needs to be stored). In general, if one iterator uses most or all of # the data before another iterator starts, it is faster to use list() # instead of tee()." # # i.e., `tee` has to store everything into memory because `kvlayer` # will exhaust the first iterator before indexes get updated. items = list(items) self.kvl.put(self.TABLE, *imap(lambda (cid, fc): ((cid,), fc_dumps(fc)), items)) if indexes: for idx_name in self._indexes: self._index_put(idx_name, *items)
python
{ "resource": "" }
q279146
Store.delete_all
test
def delete_all(self): '''Deletes all storage. This includes every content object and all index data. ''' self.kvl.clear_table(self.TABLE) self.kvl.clear_table(self.INDEX_TABLE)
python
{ "resource": "" }
q279147
Store.scan
test
def scan(self, *key_ranges): '''Retrieve feature collections in a range of ids. Returns a generator of content objects corresponding to the content identifier ranges given. `key_ranges` can be a possibly empty list of 2-tuples, where the first element of the tuple is the beginning of a range and the second element is the end of a range. To specify the beginning or end of the table, use an empty tuple `()`. If the list is empty, then this yields all content objects in the storage. :param key_ranges: as described in :meth:`kvlayer._abstract_storage.AbstractStorage` :rtype: generator of (``content_id``, :class:`dossier.fc.FeatureCollection`). ''' # (id, id) -> ((id,), (id,)) key_ranges = [(tuplify(s), tuplify(e)) for s, e in key_ranges] return imap(lambda (cid, fc): (cid[0], fc_loads(fc)), self.kvl.scan(self.TABLE, *key_ranges))
python
{ "resource": "" }
q279148
Store.scan_ids
test
def scan_ids(self, *key_ranges): '''Retrieve content ids in a range of ids. Returns a generator of ``content_id`` corresponding to the content identifier ranges given. `key_ranges` can be a possibly empty list of 2-tuples, where the first element of the tuple is the beginning of a range and the second element is the end of a range. To specify the beginning or end of the table, use an empty tuple `()`. If the list is empty, then this yields all content ids in the storage. :param key_ranges: as described in :meth:`kvlayer._abstract_storage.AbstractStorage` :rtype: generator of ``content_id`` ''' # (id, id) -> ((id,), (id,)) key_ranges = [(tuplify(s), tuplify(e)) for s, e in key_ranges] scanner = self.kvl.scan_keys(self.TABLE, *key_ranges) return imap(itemgetter(0), scanner)
python
{ "resource": "" }
q279149
Store.index_scan
test
def index_scan(self, idx_name, val): '''Returns ids that match an indexed value. Returns a generator of content identifiers that have an entry in the index ``idx_name`` with value ``val`` (after index transforms are applied). If the index named by ``idx_name`` is not registered, then a :exc:`~exceptions.KeyError` is raised. :param unicode idx_name: name of index :param val: the value to use to search the index :type val: unspecified (depends on the index, usually ``unicode``) :rtype: generator of ``content_id`` :raises: :exc:`~exceptions.KeyError` ''' idx = self._index(idx_name)['transform'] key = (idx(val), idx_name.encode('utf-8')) keys = self.kvl.scan_keys(self.INDEX_TABLE, (key, key)) return imap(lambda k: k[2], keys)
python
{ "resource": "" }
q279150
Store.index_scan_prefix
test
def index_scan_prefix(self, idx_name, val_prefix): '''Returns ids that match a prefix of an indexed value. Returns a generator of content identifiers that have an entry in the index ``idx_name`` with prefix ``val_prefix`` (after index transforms are applied). If the index named by ``idx_name`` is not registered, then a :exc:`~exceptions.KeyError` is raised. :param unicode idx_name: name of index :param val_prefix: the value to use to search the index :type val: unspecified (depends on the index, usually ``unicode``) :rtype: generator of ``content_id`` :raises: :exc:`~exceptions.KeyError` ''' return self._index_scan_prefix_impl( idx_name, val_prefix, lambda k: k[2])
python
{ "resource": "" }
q279151
Store.index_scan_prefix_and_return_key
test
def index_scan_prefix_and_return_key(self, idx_name, val_prefix): '''Returns ids that match a prefix of an indexed value, and the specific key that matched the search prefix. Returns a generator of (index key, content identifier) that have an entry in the index ``idx_name`` with prefix ``val_prefix`` (after index transforms are applied). If the index named by ``idx_name`` is not registered, then a :exc:`~exceptions.KeyError` is raised. :param unicode idx_name: name of index :param val_prefix: the value to use to search the index :type val: unspecified (depends on the index, usually ``unicode``) :rtype: generator of (``index key``, ``content_id``) :raises: :exc:`~exceptions.KeyError` ''' return self._index_scan_prefix_impl( idx_name, val_prefix, lambda k: (k[0], k[2]))
python
{ "resource": "" }
q279152
Store._index_scan_prefix_impl
test
def _index_scan_prefix_impl(self, idx_name, val_prefix, retfunc): '''Implementation for index_scan_prefix and index_scan_prefix_and_return_key, parameterized on return value function. retfunc gets passed a key tuple from the index: (index name, index value, content_id) ''' idx = self._index(idx_name)['transform'] val_prefix = idx(val_prefix) idx_name = idx_name.encode('utf-8') s = (val_prefix, idx_name) e = (val_prefix + '\xff', idx_name) keys = self.kvl.scan_keys(self.INDEX_TABLE, (s, e)) return imap(retfunc, keys)
python
{ "resource": "" }
q279153
Store.define_index
test
def define_index(self, idx_name, create, transform): '''Add an index to this store instance. Adds an index transform to the current FC store. Once an index with name ``idx_name`` is added, it will be available in all ``index_*`` methods. Additionally, the index will be automatically updated on calls to :meth:`~dossier.fc.store.Store.put`. If an index with name ``idx_name`` already exists, then it is overwritten. Note that indexes do *not* persist. They must be re-defined for each instance of :class:`Store`. For example, to add an index on the ``boNAME`` feature, you can use the ``feature_index`` helper function: .. code-block:: python store.define_index('boNAME', feature_index('boNAME'), lambda s: s.encode('utf-8')) Another example for creating an index on names: .. code-block:: python store.define_index('NAME', feature_index('canonical_name', 'NAME'), lambda s: s.lower().encode('utf-8')) :param idx_name: The name of the index. Must be UTF-8 encodable. :type idx_name: unicode :param create: A function that accepts the ``transform`` function and a pair of ``(content_id, fc)`` and produces a generator of index values from the pair given using ``transform``. :param transform: A function that accepts an arbitrary value and applies a transform to it. This transforms the *stored* value to the *index* value. This *must* produce a value with type `str` (or `bytes`). ''' assert isinstance(idx_name, (str, unicode)) idx_name = idx_name.decode('utf-8') self._indexes[idx_name] = {'create': create, 'transform': transform}
python
{ "resource": "" }
q279154
Store._index_put
test
def _index_put(self, idx_name, *ids_and_fcs): '''Add new index values. Adds new index values for index ``idx_name`` for the pairs given. Each pair should be a content identifier and a :class:`dossier.fc.FeatureCollection`. :type idx_name: unicode :type ids_and_fcs: ``[(content_id, FeatureCollection)]`` ''' keys = self._index_keys_for(idx_name, *ids_and_fcs) with_vals = map(lambda k: (k, '0'), keys) # TODO: use imap when kvl.put takes an iterable self.kvl.put(self.INDEX_TABLE, *with_vals)
python
{ "resource": "" }
q279155
Store._index_put_raw
test
def _index_put_raw(self, idx_name, content_id, val): '''Add new raw index values. Adds a new index key corresponding to ``(idx_name, transform(val), content_id)``. This method bypasses the *creation* of indexes from content objects, but values are still transformed. :type idx_name: unicode :type content_id: str :type val: unspecified (depends on the index, usually ``unicode``) ''' idx = self._index(idx_name)['transform'] key = (idx(val), idx_name.encode('utf-8'), content_id) self.kvl.put(self.INDEX_TABLE, (key, '0'))
python
{ "resource": "" }
q279156
Store._index_keys_for
test
def _index_keys_for(self, idx_name, *ids_and_fcs): '''Returns a generator of index triples. Returns a generator of index keys for the ``ids_and_fcs`` pairs given. The index keys have the form ``(idx_name, idx_val, content_id)``. :type idx_name: unicode :type ids_and_fcs: ``[(content_id, FeatureCollection)]`` :rtype: generator of ``(str, str, str)`` ''' idx = self._index(idx_name) icreate, itrans = idx['create'], idx['transform'] if isinstance(idx_name, unicode): idx_name = idx_name.encode('utf-8') for cid_fc in ids_and_fcs: content_id = cid_fc[0] # Be sure to dedup index_values or else we may # suffer duplicate_pkey errors down the line. seen_values = set() for index_value in icreate(itrans, cid_fc): if index_value and index_value not in seen_values: yield (index_value, idx_name, content_id) seen_values.add(index_value)
python
{ "resource": "" }
q279157
Store._index
test
def _index(self, name): '''Returns index transforms for ``name``. :type name: unicode :rtype: ``{ create |--> function, transform |--> function }`` ''' name = name.decode('utf-8') try: return self._indexes[name] except KeyError: raise KeyError('Index "%s" has not been registered with ' 'this FC store.' % name)
python
{ "resource": "" }
q279158
check_pypi_name
test
def check_pypi_name(pypi_package_name, pypi_registry_host=None): """ Check if a package name exists on pypi. TODO: Document the Registry URL construction. It may not be obvious how pypi_package_name and pypi_registry_host are used I'm appending the simple HTTP API parts of the registry standard specification. It will return True if the package name, or any equivalent variation as defined by PEP 503 normalisation rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is registered in the PyPI registry. >>> check_pypi_name('pip') True >>> check_pypi_name('Pip') True It will return False if the package name, or any equivalent variation as defined by PEP 503 normalisation rules (https://www.python.org/dev/peps/pep-0503/#normalized-names) is not registered in the PyPI registry. >>> check_pypi_name('testy_mc-test_case-has.a.cousin_who_should_never_write_a_package') False :param pypi_package_name: :param pypi_registry_host: :return: """ if pypi_registry_host is None: pypi_registry_host = 'pypi.python.org' # Just a helpful reminder why this bytearray size was chosen. # HTTP/1.1 200 OK # HTTP/1.1 404 Not Found receive_buffer = bytearray(b'------------') context = ssl.create_default_context() ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host) ssl_http_socket.connect((pypi_registry_host, 443)) ssl_http_socket.send(b''.join([ b"HEAD /simple/", pypi_package_name.encode('ascii'), b"/ HTTP/1.0", b"\r\n", b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n", b"\r\n\r\n" ])) ssl_http_socket.recv_into(receive_buffer) # Early return when possible. if b'HTTP/1.1 200' in receive_buffer: ssl_http_socket.shutdown(1) ssl_http_socket.close() return True elif b'HTTP/1.1 404' in receive_buffer: ssl_http_socket.shutdown(1) ssl_http_socket.close() return False remaining_bytes = ssl_http_socket.recv(2048) redirect_path_location_start = remaining_bytes.find(b'Location:') + 10 redirect_path_location_end = remaining_bytes.find(b'\r\n', redirect_path_location_start) # Append the trailing slash to avoid a needless extra redirect. redirect_path = remaining_bytes[redirect_path_location_start:redirect_path_location_end] + b'/' ssl_http_socket.shutdown(1) ssl_http_socket.close() # Reset the bytearray to empty # receive_buffer = bytearray(b'------------') ssl_http_socket = context.wrap_socket(socket.socket(socket.AF_INET), server_hostname=pypi_registry_host) ssl_http_socket.connect((pypi_registry_host, 443)) ssl_http_socket.send(b''.join([ b"HEAD ", redirect_path, b" HTTP/1.0", b"\r\n", b"Host: ", pypi_registry_host.encode('ascii'), b"\r\n", b"\r\n\r\n"])) ssl_http_socket.recv_into(receive_buffer) if b'HTTP/1.1 200' in receive_buffer: return True elif b'HTTP/1.1 404' in receive_buffer: return False else: NotImplementedError('A definitive answer was not found by primary or secondary lookups.')
python
{ "resource": "" }
q279159
add_direction
test
def add_direction(value, arg=u"rtl_only"): """Adds direction to the element :arguments: arg * rtl_only: Add the direction only in case of a right-to-left language (default) * both: add the direction in both case * ltr_only: Add the direction only in case of a left-to-right language {{image_name|add_direction}} when image_name is 'start_arrow.png' results in 'start_arrow_rtl.png' in case of RTL language, and 'start_arrow.png' or 'start_arrow_ltr.png' depends on `arg` value. """ if arg == u'rtl_only': directions = (u'', u'_rtl') elif arg == u'both': directions = (u'_ltr', u'_rtl') elif arg == u'ltr_only': directions = (u'_ltr', u'') else: raise template.TemplateSyntaxError('add_direction can use arg with one of ["rtl_only", "both", "ltr_only"]') parts = value.rsplit('.', 1) if not len(parts): return value elif len(parts) == 1: return value + directions[translation.get_language_bidi()] else: return '.'.join((parts[0]+directions[translation.get_language_bidi()],parts[1]))
python
{ "resource": "" }
q279160
get_type_name
test
def get_type_name(s_dt): ''' get the xsd name of a S_DT ''' s_cdt = nav_one(s_dt).S_CDT[17]() if s_cdt and s_cdt.Core_Typ in range(1, 6): return s_dt.Name s_edt = nav_one(s_dt).S_EDT[17]() if s_edt: return s_dt.Name s_udt = nav_one(s_dt).S_UDT[17]() if s_udt: return s_dt.Name
python
{ "resource": "" }
q279161
get_refered_attribute
test
def get_refered_attribute(o_attr): ''' Get the the referred attribute. ''' o_attr_ref = nav_one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]() if o_attr_ref: return get_refered_attribute(o_attr_ref) else: return o_attr
python
{ "resource": "" }
q279162
build_core_type
test
def build_core_type(s_cdt): ''' Build an xsd simpleType out of a S_CDT. ''' s_dt = nav_one(s_cdt).S_DT[17]() if s_dt.name == 'void': type_name = None elif s_dt.name == 'boolean': type_name = 'xs:boolean' elif s_dt.name == 'integer': type_name = 'xs:integer' elif s_dt.name == 'real': type_name = 'xs:decimal' elif s_dt.name == 'string': type_name = 'xs:string' elif s_dt.name == 'unique_id': type_name = 'xs:integer' else: type_name = None if type_name: mapped_type = ET.Element('xs:simpleType', name=s_dt.name) ET.SubElement(mapped_type, 'xs:restriction', base=type_name) return mapped_type
python
{ "resource": "" }
q279163
build_enum_type
test
def build_enum_type(s_edt): ''' Build an xsd simpleType out of a S_EDT. ''' s_dt = nav_one(s_edt).S_DT[17]() enum = ET.Element('xs:simpleType', name=s_dt.name) enum_list = ET.SubElement(enum, 'xs:restriction', base='xs:string') first_filter = lambda selected: not nav_one(selected).S_ENUM[56, 'succeeds']() s_enum = nav_any(s_edt).S_ENUM[27](first_filter) while s_enum: ET.SubElement(enum_list, 'xs:enumeration', value=s_enum.name) s_enum = nav_one(s_enum).S_ENUM[56, 'precedes']() return enum
python
{ "resource": "" }
q279164
build_struct_type
test
def build_struct_type(s_sdt): ''' Build an xsd complexType out of a S_SDT. ''' s_dt = nav_one(s_sdt).S_DT[17]() struct = ET.Element('xs:complexType', name=s_dt.name) first_filter = lambda selected: not nav_one(selected).S_MBR[46, 'succeeds']() s_mbr = nav_any(s_sdt).S_MBR[44](first_filter) while s_mbr: s_dt = nav_one(s_mbr).S_DT[45]() type_name = get_type_name(s_dt) ET.SubElement(struct, 'xs:attribute', name=s_mbr.name, type=type_name) s_mbr = nav_one(s_mbr).S_MBR[46, 'precedes']() return struct
python
{ "resource": "" }
q279165
build_user_type
test
def build_user_type(s_udt): ''' Build an xsd simpleType out of a S_UDT. ''' s_dt_user = nav_one(s_udt).S_DT[17]() s_dt_base = nav_one(s_udt).S_DT[18]() base_name = get_type_name(s_dt_base) if base_name: user = ET.Element('xs:simpleType', name=s_dt_user.name) ET.SubElement(user, 'xs:restriction', base=base_name) return user
python
{ "resource": "" }
q279166
build_type
test
def build_type(s_dt): ''' Build a partial xsd tree out of a S_DT and its sub types S_CDT, S_EDT, S_SDT and S_UDT. ''' s_cdt = nav_one(s_dt).S_CDT[17]() if s_cdt: return build_core_type(s_cdt) s_edt = nav_one(s_dt).S_EDT[17]() if s_edt: return build_enum_type(s_edt) s_udt = nav_one(s_dt).S_UDT[17]() if s_udt: return build_user_type(s_udt)
python
{ "resource": "" }
q279167
build_class
test
def build_class(o_obj): ''' Build an xsd complex element out of a O_OBJ, including its O_ATTR. ''' cls = ET.Element('xs:element', name=o_obj.key_lett, minOccurs='0', maxOccurs='unbounded') attributes = ET.SubElement(cls, 'xs:complexType') for o_attr in nav_many(o_obj).O_ATTR[102](): o_attr_ref = get_refered_attribute(o_attr) s_dt = nav_one(o_attr_ref).S_DT[114]() while nav_one(s_dt).S_UDT[17](): s_dt = nav_one(s_dt).S_UDT[17].S_DT[18]() type_name = get_type_name(s_dt) if type_name and not nav_one(o_attr).O_BATTR[106].O_DBATTR[107](): ET.SubElement(attributes, 'xs:attribute', name=o_attr.name, type=type_name) else: logger.warning('Omitting %s.%s' % (o_obj.key_lett, o_attr.Name)) return cls
python
{ "resource": "" }
q279168
build_component
test
def build_component(m, c_c): ''' Build an xsd complex element out of a C_C, including its packaged S_DT and O_OBJ. ''' component = ET.Element('xs:element', name=c_c.name) classes = ET.SubElement(component, 'xs:complexType') classes = ET.SubElement(classes, 'xs:sequence') scope_filter = lambda selected: ooaofooa.is_contained_in(selected, c_c) for o_obj in m.select_many('O_OBJ', scope_filter): cls = build_class(o_obj) classes.append(cls) return component
python
{ "resource": "" }
q279169
build_schema
test
def build_schema(m, c_c): ''' Build an xsd schema from a bridgepoint component. ''' schema = ET.Element('xs:schema') schema.set('xmlns:xs', 'http://www.w3.org/2001/XMLSchema') global_filter = lambda selected: ooaofooa.is_global(selected) for s_dt in m.select_many('S_DT', global_filter): datatype = build_type(s_dt) if datatype is not None: schema.append(datatype) scope_filter = lambda selected: ooaofooa.is_contained_in(selected, c_c) for s_dt in m.select_many('S_DT', scope_filter): datatype = build_type(s_dt) if datatype is not None: schema.append(datatype) component = build_component(m, c_c) schema.append(component) return schema
python
{ "resource": "" }
q279170
prettify
test
def prettify(xml_string): ''' Indent an xml string with four spaces, and add an additional line break after each node. ''' reparsed = xml.dom.minidom.parseString(xml_string) return reparsed.toprettyxml(indent=" ")
python
{ "resource": "" }
q279171
fetch_bikes
test
async def fetch_bikes() -> List[dict]: """ Gets the full list of bikes from the bikeregister site. The data is hidden behind a form post request and so we need to extract an xsrf and session token with bs4. todo add pytest tests :return: All the currently registered bikes. :raise ApiError: When there was an error connecting to the API. """ async with ClientSession() as session: try: async with session.get('https://www.bikeregister.com/stolen-bikes') as request: document = document_fromstring(await request.text()) except ClientConnectionError as con_err: logger.debug(f"Could not connect to {con_err.host}") raise ApiError(f"Could not connect to {con_err.host}") token = document.xpath("//input[@name='_token']") if len(token) != 1: raise ApiError(f"Couldn't extract token from page.") else: token = token[0].value xsrf_token = request.cookies["XSRF-TOKEN"] laravel_session = request.cookies["laravel_session"] # get the bike data headers = { 'cookie': f'XSRF-TOKEN={xsrf_token}; laravel_session={laravel_session}', 'origin': 'https://www.bikeregister.com', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'accept': '*/*', 'referer': 'https://www.bikeregister.com/stolen-bikes', 'authority': 'www.bikeregister.com', 'x-requested-with': 'XMLHttpRequest', } data = [ ('_token', token), ('make', ''), ('model', ''), ('colour', ''), ('reporting_period', '1'), ] try: async with session.post('https://www.bikeregister.com/stolen-bikes', headers=headers, data=data) as request: bikes = json.loads(await request.text()) except ClientConnectionError as con_err: logger.debug(f"Could not connect to {con_err.host}") raise ApiError(f"Could not connect to {con_err.host}") except json.JSONDecodeError as dec_err: logger.error(f"Could not decode data: {dec_err.msg}") raise ApiError(f"Could not decode data: {dec_err.msg}") return bikes # if cant open a session return []
python
{ "resource": "" }
q279172
set_positional_info
test
def set_positional_info(node, p): ''' set positional information on a node ''' node.position = Position() node.position.label = p.lexer.label node.position.start_stream = p.lexpos(1) node.position.start_line = p.lineno(1) node.position.start_column = find_column(p.lexer.lexdata, node.position.start_stream) _, node.position.end_stream = p.lexspan(len(p) - 1) _, node.position.end_line = p.linespan(len(p) - 1) node.position.end_column = find_column(p.lexer.lexdata, node.position.end_stream) - 1 node.character_stream = p.lexer.lexdata[node.position.start_stream: node.position.end_stream]
python
{ "resource": "" }
q279173
track_production
test
def track_production(f): ''' decorator for adding positional information to returning nodes ''' @wraps(f) def wrapper(self, p): r = f(self, p) node = p[0] if isinstance(node, Node) and len(p) > 1: set_positional_info(node, p) return r return wrapper
python
{ "resource": "" }
q279174
OALParser.t_DOUBLEEQUAL
test
def t_DOUBLEEQUAL(self, t): r"\=\=" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279175
OALParser.t_NOTEQUAL
test
def t_NOTEQUAL(self, t): r"!\=" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279176
OALParser.t_ARROW
test
def t_ARROW(self, t): r"\-\>" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279177
OALParser.t_LE
test
def t_LE(self, t): r"\<\=" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279178
OALParser.t_GE
test
def t_GE(self, t): r"\>\=" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279179
OALParser.t_EQUAL
test
def t_EQUAL(self, t): r"\=" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279180
OALParser.t_DOT
test
def t_DOT(self, t): r"\." t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279181
OALParser.t_LSQBR
test
def t_LSQBR(self, t): r"\[" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279182
OALParser.t_RSQBR
test
def t_RSQBR(self, t): r"\]" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279183
OALParser.t_QMARK
test
def t_QMARK(self, t): r"\?" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279184
OALParser.t_LESSTHAN
test
def t_LESSTHAN(self, t): r"\<" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279185
OALParser.t_GT
test
def t_GT(self, t): r"\>" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279186
OALParser.t_PLUS
test
def t_PLUS(self, t): r"\+" t.endlexpos = t.lexpos + len(t.value) return t
python
{ "resource": "" }
q279187
RequestCmd.create_queue
test
def create_queue(self, name, strict=True, auto_delete=False, auto_delete_timeout=0): """Create message content and properties to create queue with QMFv2 :param name: Name of queue to create :type name: str :param strict: Whether command should fail when unrecognized properties are provided Not used by QMFv2 Default: True :type strict: bool :param auto_delete: Whether queue should be auto deleted Default: False :type auto_delete: bool :param auto_delete_timeout: Timeout in seconds for auto deleting queue Default: 10 :type auto_delete_timeout: int :returns: Tuple containing content and method properties """ content = {"_object_id": {"_object_name": self.object_name}, "_method_name": "create", "_arguments": {"type": "queue", "name": name, "strict": strict, "properties": {"auto-delete": auto_delete, "qpid.auto_delete_timeout": auto_delete_timeout}}} logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
python
{ "resource": "" }
q279188
RequestCmd.delete_queue
test
def delete_queue(self, name): """Create message content and properties to delete queue with QMFv2 :param name: Name of queue to delete :type name: str :returns: Tuple containing content and method properties """ content = {"_object_id": {"_object_name": self.object_name}, "_method_name": "delete", "_arguments": {"type": "queue", "name": name, "options": dict()}} # "A nested map with the key options. This is presently unused." logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
python
{ "resource": "" }
q279189
RequestCmd.list_queues
test
def list_queues(self): """Create message content and properties to list all queues with QMFv2 :returns: Tuple containing content and query properties """ content = {"_what": "OBJECT", "_schema_id": {"_class_name": "queue"}} logger.debug("Message content -> {0}".format(content)) return content, self.query_properties
python
{ "resource": "" }
q279190
RequestCmd.list_exchanges
test
def list_exchanges(self): """Create message content and properties to list all exchanges with QMFv2 :returns: Tuple containing content and query properties """ content = {"_what": "OBJECT", "_schema_id": {"_class_name": "exchange"}} logger.debug("Message content -> {0}".format(content)) return content, self.query_properties
python
{ "resource": "" }
q279191
RequestCmd.purge_queue
test
def purge_queue(self, name): """Create message content and properties to purge queue with QMFv2 :param name: Name of queue to purge :type name: str :returns: Tuple containing content and method properties """ content = {"_object_id": {"_object_name": "org.apache.qpid.broker:queue:{0}".format(name)}, "_method_name": "purge", "_arguments": {"type": "queue", "name": name, "filter": dict()}} logger.debug("Message content -> {0}".format(content)) return content, self.method_properties
python
{ "resource": "" }
q279192
Gmailer._create_msg
test
def _create_msg(self, to, subject, msgHtml, msgPlain, attachments=None): ''' attachments should be a list of paths ''' sender = self.sender if attachments and isinstance(attachments, str): attachments = [attachments] else: attachments = list(attachments or []) msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = sender msg['To'] = to msg.attach(MIMEText(msgPlain, 'plain')) msg.attach(MIMEText(msgHtml, 'html')) # append attachments if any for path in attachments: _attachment = self._prep_attachment(path) msg.attach(_attachment) raw = base64.urlsafe_b64encode(msg.as_bytes()).decode() #raw = raw.decode() body = {'raw': raw} return body
python
{ "resource": "" }
q279193
OCR.read
test
def read(self): """ Returns the text from an image at a given url. """ # Only download the image if it has changed if self.connection.has_changed(): image_path = self.connection.download_image() image = Image.open(image_path) self.text_cache = pytesseract.image_to_string(image) image.close() return self.text_cache
python
{ "resource": "" }
q279194
OCR.text_visible
test
def text_visible(self): """ Returns true or false based on if the OCR process has read actual words. This is needed to prevent non-words from being added to the queue since the ocr process can sometimes return values that are not meaningfull. """ # Split the input string at points with any amount of whitespace words = self.read().split() # Light weight check to see if a word exists for word in words: # If the word is a numeric value if word.lstrip('-').replace('.', '', 1).isdigit(): return True # If the word contains only letters with a length from 2 to 20 if word.isalpha() and (len(word) > 1 or len(word) <= 20): return True return False
python
{ "resource": "" }
q279195
main
test
def main(): ''' Parse command line options and launch the interpreter ''' parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path..]", version=xtuml.version.complete_string, formatter=optparse.TitledHelpFormatter()) parser.add_option("-v", "--verbosity", dest='verbosity', action="count", default=1, help="increase debug logging level") parser.add_option("-f", "--function", dest='function', action="store", help="invoke function named NAME", metavar='NAME') parser.add_option("-c", "--component", dest='component', action="store", help="look for the function in a component named NAME", metavar='NAME', default=None) (opts, args) = parser.parse_args() if len(args) == 0 or not opts.function: parser.print_help() sys.exit(1) levels = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, } logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG)) from bridgepoint import ooaofooa mm = ooaofooa.load_metamodel(args) c_c = mm.select_any('C_C', where(Name=opts.component)) domain = ooaofooa.mk_component(mm, c_c, derived_attributes=False) func = domain.find_symbol(opts.function) return func()
python
{ "resource": "" }
q279196
serialize_value
test
def serialize_value(value, ty): ''' Serialize a value from an xtuml metamodel instance. ''' ty = ty.upper() null_value = { 'BOOLEAN' : False, 'INTEGER' : 0, 'REAL' : 0.0, 'STRING' : '', 'UNIQUE_ID' : 0 } transfer_fn = { 'BOOLEAN' : lambda v: '%d' % int(v), 'INTEGER' : lambda v: '%d' % v, 'REAL' : lambda v: '%f' % v, 'STRING' : lambda v: "'%s'" % v.replace("'", "''"), 'UNIQUE_ID' : lambda v: '"%s"' % uuid.UUID(int=v) } if value is None: value = null_value[ty] return transfer_fn[ty](value)
python
{ "resource": "" }
q279197
serialize_association
test
def serialize_association(ass): ''' Serialize an xtuml metamodel association. ''' s1 = '%s %s (%s)' % (ass.source_link.cardinality, ass.source_link.to_metaclass.kind, ', '.join(ass.source_keys)) if ass.target_link.phrase: s1 += " PHRASE '%s'" % ass.target_link.phrase s2 = '%s %s (%s)' % (ass.target_link.cardinality, ass.target_link.to_metaclass.kind, ', '.join(ass.target_keys)) if ass.source_link.phrase: s2 += " PHRASE '%s'" % ass.source_link.phrase return 'CREATE ROP REF_ID %s FROM %s TO %s;\n' % (ass.rel_id, s1, s2)
python
{ "resource": "" }
q279198
serialize_class
test
def serialize_class(Cls): ''' Serialize an xtUML metamodel class. ''' metaclass = xtuml.get_metaclass(Cls) attributes = ['%s %s' % (name, ty.upper()) for name, ty in metaclass.attributes] s = 'CREATE TABLE %s (\n ' % metaclass.kind s += ',\n '.join(attributes) s += '\n);\n' return s
python
{ "resource": "" }
q279199
main
test
def main(): """Function for command line execution""" parser = ArgumentParser(description="search files using n-grams") parser.add_argument('--path', dest='path', help="where to search", nargs=1, action="store", default=getcwd()) parser.add_argument('--update', dest='update', help="update the index", action='store_true', default=True) parser.add_argument('--filetype', dest='filetype', help="any, images, documents, code, audio, video", nargs=1, action="store", default=["any"]) parser.add_argument('--verbose', dest='verbose', help="extended output", action='store_true', default=False) parser.add_argument('--results', dest='results', help="number of results to display", action="store", default=10) parser.add_argument('query', nargs='+', help="what to search", action="store") args = parser.parse_args() if args.verbose: verbose = 2 pprint(args) else: verbose = 0 query = args.query[0] for arg in args.query[1:]: query = query + " " + arg slb = min([len(w) for w in query.split(" ")]) files = Files(path=args.path, filetype=args.filetype[0], exclude=[], update=args.update, verbose=verbose) index = Index(files, slb=slb, verbose=verbose) results = index.search(query, verbose=verbose) Handler(results, results_number=int(args.results))
python
{ "resource": "" }