before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def visit_schema(self, output: Optional[str]=None, **_):
base = URIRef(self.schema.id)
self.graph = Graph(identifier=base)
for prefix in self.metamodel.schema.emit_prefixes:
self.graph.bind(prefix, self.metamodel.namespaces[prefix])
self.graph.add((base, RDF.type, OWL.Ontology))
<DeepExtract>
for (k, v) in self.schema.__dict__.items():
if k in self.metamodel.schema.slots:
defining_slot = self.metamodel.schema.slots[k]
if v is not None and 'owl' in defining_slot.in_subset:
ve = v if isinstance(v, list) else [v]
for e in ve:
self.graph.add((base, URIRef(self.metamodel.namespaces.uri_for(defining_slot.slot_uri)), Literal(e)))
</DeepExtract>
for name in ['class_definition', 'type_definition', 'slot_definition', 'subset_definition']:
<DeepExtract>
metac = self.metamodel.schema.classes[name]
metac_uri = self.metamodel.namespaces[METAMODEL_NAMESPACE_NAME][camelcase(metac.name)]
self.graph.add((metac_uri, RDF.type, OWL.Class))
self._add_element_properties(metac_uri, metac)
</DeepExtract>
self.top_value_uri = self.metamodel.namespaces[METAMODEL_NAMESPACE_NAME]['topValue']
self.graph.add((self.top_value_uri, RDF.type, OWL.DatatypeProperty))
self.graph.add((self.top_value_uri, RDFS.label, Literal('value')))
|
def visit_schema(self, output: Optional[str]=None, **_):
base = URIRef(self.schema.id)
self.graph = Graph(identifier=base)
for prefix in self.metamodel.schema.emit_prefixes:
self.graph.bind(prefix, self.metamodel.namespaces[prefix])
self.graph.add((base, RDF.type, OWL.Ontology))
for (k, v) in self.schema.__dict__.items():
if k in self.metamodel.schema.slots:
defining_slot = self.metamodel.schema.slots[k]
if v is not None and 'owl' in defining_slot.in_subset:
ve = v if isinstance(v, list) else [v]
for e in ve:
self.graph.add((base, URIRef(self.metamodel.namespaces.uri_for(defining_slot.slot_uri)), Literal(e)))
for name in ['class_definition', 'type_definition', 'slot_definition', 'subset_definition']:
metac = self.metamodel.schema.classes[name]
metac_uri = self.metamodel.namespaces[METAMODEL_NAMESPACE_NAME][camelcase(metac.name)]
self.graph.add((metac_uri, RDF.type, OWL.Class))
self._add_element_properties(metac_uri, metac)
self.top_value_uri = self.metamodel.namespaces[METAMODEL_NAMESPACE_NAME]['topValue']
self.graph.add((self.top_value_uri, RDF.type, OWL.DatatypeProperty))
self.graph.add((self.top_value_uri, RDFS.label, Literal('value')))
|
biolinkml
|
positive
|
def get_evaluation(self, sess, batch):
<DeepExtract>
(idxs, data_set) = batch
</DeepExtract>
assert isinstance(data_set, DataSet)
<DeepExtract>
feed_dict = self.model.get_feed_dict(batch[1], False)
</DeepExtract>
(global_step, yp, yp2, loss, vals) = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
if self.config.squash:
new_y = []
for (xi, yi) in zip(data_set.data['x'], y):
new_yi = []
for (start, stop) in yi:
start_offset = sum(map(len, xi[:start[0]]))
stop_offset = sum(map(len, xi[:stop[0]]))
new_start = (0, start_offset + start[1])
new_stop = (0, stop_offset + stop[1])
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
if self.config.single:
new_y = []
for yi in y:
new_yi = []
for (start, stop) in yi:
new_start = (0, start[1])
new_stop = (0, stop[1])
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
(yp, yp2) = (yp[:data_set.num_examples], yp2[:data_set.num_examples])
(spans, scores) = zip(*[get_best_span(ypi, yp2i) for (ypi, yp2i) in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return ['']
if len(xi[span[0][0]]) <= span[1][1]:
return ['']
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ''
if len(xi[span[0][0]]) <= span[1][1]:
return ''
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span) for (id_, xi, span, context) in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for (id_, score) in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
correct = [self.__class__.compare2(yi, span) for (yi, span) in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for (yi, span) in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y, correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
return e
|
def get_evaluation(self, sess, batch):
(idxs, data_set) = batch
assert isinstance(data_set, DataSet)
feed_dict = self.model.get_feed_dict(batch[1], False)
(global_step, yp, yp2, loss, vals) = sess.run([self.global_step, self.yp, self.yp2, self.loss, list(self.tensor_dict.values())], feed_dict=feed_dict)
y = data_set.data['y']
if self.config.squash:
new_y = []
for (xi, yi) in zip(data_set.data['x'], y):
new_yi = []
for (start, stop) in yi:
start_offset = sum(map(len, xi[:start[0]]))
stop_offset = sum(map(len, xi[:stop[0]]))
new_start = (0, start_offset + start[1])
new_stop = (0, stop_offset + stop[1])
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
if self.config.single:
new_y = []
for yi in y:
new_yi = []
for (start, stop) in yi:
new_start = (0, start[1])
new_stop = (0, stop[1])
new_yi.append((new_start, new_stop))
new_y.append(new_yi)
y = new_y
(yp, yp2) = (yp[:data_set.num_examples], yp2[:data_set.num_examples])
(spans, scores) = zip(*[get_best_span(ypi, yp2i) for (ypi, yp2i) in zip(yp, yp2)])
def _get(xi, span):
if len(xi) <= span[0][0]:
return ['']
if len(xi[span[0][0]]) <= span[1][1]:
return ['']
return xi[span[0][0]][span[0][1]:span[1][1]]
def _get2(context, xi, span):
if len(xi) <= span[0][0]:
return ''
if len(xi[span[0][0]]) <= span[1][1]:
return ''
return get_phrase(context, xi, span)
id2answer_dict = {id_: _get2(context, xi, span) for (id_, xi, span, context) in zip(data_set.data['ids'], data_set.data['x'], spans, data_set.data['p'])}
id2score_dict = {id_: score for (id_, score) in zip(data_set.data['ids'], scores)}
id2answer_dict['scores'] = id2score_dict
correct = [self.__class__.compare2(yi, span) for (yi, span) in zip(y, spans)]
f1s = [self.__class__.span_f1(yi, span) for (yi, span) in zip(y, spans)]
tensor_dict = dict(zip(self.tensor_dict.keys(), vals))
e = F1Evaluation(data_set.data_type, int(global_step), idxs, yp.tolist(), yp2.tolist(), y, correct, float(loss), f1s, id2answer_dict, tensor_dict=tensor_dict)
return e
|
adversarial-squad
|
positive
|
def result(self):
class_info = {}
origin_counter = Counter([self.id2label[x[0]] for x in self.origins])
found_counter = Counter([self.id2label[x[0]] for x in self.founds])
right_counter = Counter([self.id2label[x[0]] for x in self.rights])
for (type_, count) in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
<DeepExtract>
recall = 0 if origin == 0 else right / origin
precision = 0 if found == 0 else right / found
f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall)
(recall, precision, f1) = (recall, precision, f1)
</DeepExtract>
class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
<DeepExtract>
recall = 0 if origin == 0 else right / origin
precision = 0 if found == 0 else right / found
f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall)
(recall, precision, f1) = (recall, precision, f1)
</DeepExtract>
return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
|
def result(self):
class_info = {}
origin_counter = Counter([self.id2label[x[0]] for x in self.origins])
found_counter = Counter([self.id2label[x[0]] for x in self.founds])
right_counter = Counter([self.id2label[x[0]] for x in self.rights])
for (type_, count) in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
recall = 0 if origin == 0 else right / origin
precision = 0 if found == 0 else right / found
f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall)
(recall, precision, f1) = (recall, precision, f1)
class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)}
origin = len(self.origins)
found = len(self.founds)
right = len(self.rights)
recall = 0 if origin == 0 else right / origin
precision = 0 if found == 0 else right / found
f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall)
(recall, precision, f1) = (recall, precision, f1)
return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
|
BERT-Attribute-Value-Extract
|
positive
|
def run_eval(self, results, save_dir):
<DeepExtract>
json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w'))
</DeepExtract>
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
|
def run_eval(self, results, save_dir):
json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w'))
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
|
DANR
|
positive
|
def api(self, command):
"""call remote API"""
try:
result = self.api_session.get(self.url + command, headers=self.headers, timeout=self.timeout, proxies=self.proxy)
result.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
<DeepExtract>
try:
if result.json()['error']:
raise APIError(result.json())
except TypeError:
pass
</DeepExtract>
return result.json()
|
def api(self, command):
"""call remote API"""
try:
result = self.api_session.get(self.url + command, headers=self.headers, timeout=self.timeout, proxies=self.proxy)
result.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
try:
if result.json()['error']:
raise APIError(result.json())
except TypeError:
pass
return result.json()
|
cryptotik
|
positive
|
def register_hook(self, synchronize=True):
"""
Decorator to register hook functions for this message processor.
Args:
synchronize (bool): Enables thread synchronization for entire hook function.
"""
def decorator(func):
ret_func = func
if synchronize:
<DeepExtract>
def synchronizer(*args, **kwargs):
with self._hook_lock:
ret_func = func(*args, **kwargs)
ret_func = synchronizer
</DeepExtract>
name = func.__name__
self._hook_funcs[name] = ret_func
if DEBUG:
log.debug("Registered hook function '%s'", name)
return ret_func
return decorator
|
def register_hook(self, synchronize=True):
"""
Decorator to register hook functions for this message processor.
Args:
synchronize (bool): Enables thread synchronization for entire hook function.
"""
def decorator(func):
ret_func = func
if synchronize:
def synchronizer(*args, **kwargs):
with self._hook_lock:
ret_func = func(*args, **kwargs)
ret_func = synchronizer
name = func.__name__
self._hook_funcs[name] = ret_func
if DEBUG:
log.debug("Registered hook function '%s'", name)
return ret_func
return decorator
|
autopi-core
|
positive
|
def set_selection_range(self, begin, end):
if begin > end:
tmp = begin
begin = end
end = tmp
<DeepExtract>
ofs = begin - self.data.start()
if ofs < 0:
return False
if ofs > len(self.data):
return False
self.cursorY = int(ofs / self.cols)
self.cursorX = (ofs - self.cursorY * self.cols) * 2
self.deselect()
self.repositionCaret()
return True
</DeepExtract>
<DeepExtract>
begin = self.cursorY * self.cols + int(self.cursorX / 2)
</DeepExtract>
self.selectionStartX = self.cursorX
self.selectionStartY = self.cursorY
ofs = end - self.data.start()
if end < begin:
return
if end > len(self.data):
end = len(self.data)
self.cursorY = int(ofs / self.cols)
self.cursorX = (ofs - self.cursorY * self.cols) * 2
self.viewport().update()
|
def set_selection_range(self, begin, end):
if begin > end:
tmp = begin
begin = end
end = tmp
ofs = begin - self.data.start()
if ofs < 0:
return False
if ofs > len(self.data):
return False
self.cursorY = int(ofs / self.cols)
self.cursorX = (ofs - self.cursorY * self.cols) * 2
self.deselect()
self.repositionCaret()
return True
begin = self.cursorY * self.cols + int(self.cursorX / 2)
self.selectionStartX = self.cursorX
self.selectionStartY = self.cursorY
ofs = end - self.data.start()
if end < begin:
return
if end > len(self.data):
end = len(self.data)
self.cursorY = int(ofs / self.cols)
self.cursorX = (ofs - self.cursorY * self.cols) * 2
self.viewport().update()
|
deprecated-binaryninja-python
|
positive
|
def make_ready(self, dc, *args, **kwargs):
self.products = []
self.low_res_products = []
for (i, prod_name) in enumerate(self.product_names):
if self.low_res_product_names:
low_res_prod_name = self.low_res_product_names[i]
else:
low_res_prod_name = None
product = dc.index.products.get_by_name(prod_name)
if not product:
raise ConfigException(f'Could not find product {prod_name} in datacube for layer {self.name}')
self.products.append(product)
if low_res_prod_name:
product = dc.index.products.get_by_name(low_res_prod_name)
if not product:
raise ConfigException(f'Could not find product {low_res_prod_name} in datacube for layer {self.name}')
self.low_res_products.append(product)
self.product = self.products[0]
self.definition = self.product.definition
<DeepExtract>
if dc:
dc = dc
else:
dc = get_cube()
self.hide = False
self._ranges = None
try:
from datacube_ows.product_ranges import get_ranges
self._ranges = get_ranges(dc, self)
if self._ranges is None:
raise Exception('Null product range')
self.bboxes = self.extract_bboxes()
if self.default_time_rule == DEF_TIME_EARLIEST:
self.default_time = self._ranges['start_time']
elif isinstance(self.default_time_rule, datetime.date) and self.default_time_rule in self._ranges['time_set']:
self.default_time = self.default_time_rule
elif isinstance(self.default_time_rule, datetime.date):
_LOG.warning('default_time for named_layer %s is explicit date (%s) that is not available for the layer. Using most recent available date instead.', self.name, self.default_time_rule.isoformat())
self.default_time = self._ranges['end_time']
else:
self.default_time = self._ranges['end_time']
except Exception as a:
if not self.global_cfg.called_from_update_ranges:
_LOG.warning('get_ranges failed for layer %s: %s', self.name, str(a))
self.hide = True
self.bboxes = {}
</DeepExtract>
self.band_idx.make_ready(dc)
self.resource_limits.make_ready(dc)
self.all_flag_band_names = set()
for fb in self.flag_bands.values():
fb.make_ready(dc)
if fb.pq_band in self.all_flag_band_names:
raise ConfigException(f'Duplicate flag band name: {fb.pq_band}')
self.all_flag_band_names.add(fb.pq_band)
<DeepExtract>
self.always_fetch_bands = list([self.band_idx.band(b) for b in self.raw_afb])
</DeepExtract>
<DeepExtract>
try:
self.native_CRS = self.product.definition['storage']['crs']
if self.cfg_native_crs == self.native_CRS:
_LOG.debug('Native crs for layer %s is specified in ODC metadata and does not need to be specified in configuration', self.name)
else:
_LOG.warning('Native crs for layer %s is specified in config as %s - overridden to %s by ODC metadata', self.name, self.cfg_native_crs, self.native_CRS)
except KeyError:
self.native_CRS = self.cfg_native_crs
if not self.native_CRS:
raise ConfigException(f'No native CRS could be found for layer {self.name}')
if self.native_CRS not in self.global_cfg.published_CRSs:
raise ConfigException(f'Native CRS for product {self.product_name} in layer {self.name} ({self.native_CRS}) not in published CRSs')
self.native_CRS_def = self.global_cfg.published_CRSs[self.native_CRS]
try:
self.resolution_x = self.product.definition['storage']['resolution'][self.native_CRS_def['horizontal_coord']]
self.resolution_y = self.product.definition['storage']['resolution'][self.native_CRS_def['vertical_coord']]
except KeyError:
self.resolution_x = None
self.resolution_y = None
if self.resolution_x is None:
try:
if self.cfg_native_resolution is None:
raise KeyError
(self.resolution_x, self.resolution_y) = self.cfg_native_resolution
except KeyError:
raise ConfigException(f'No native resolution supplied for layer {self.name} with no product-native resolution defined in ODC.')
except ValueError:
raise ConfigException(f'Invalid native resolution supplied for layer {self.name}')
except TypeError:
raise ConfigException(f'Invalid native resolution supplied for layer {self.name}')
elif self.cfg_native_resolution:
(config_x, config_y) = (float(r) for r in self.cfg_native_resolution)
if math.isclose(config_x, float(self.resolution_x), rel_tol=1e-08) and math.isclose(config_y, float(self.resolution_y), rel_tol=1e-08):
_LOG.debug('Native resolution for layer %s is specified in ODC metadata and does not need to be specified in configuration', self.name)
else:
_LOG.warning('Native resolution for layer %s is specified in config as %s - overridden to (%.15f, %.15f) by ODC metadata', self.name, repr(self.cfg_native_resolution), self.resolution_x, self.resolution_y)
</DeepExtract>
if self.global_cfg.wcs:
<DeepExtract>
if self.global_cfg.wcs and self.wcs:
try:
native_bounding_box = self.bboxes[self.native_CRS]
except KeyError:
if not self.global_cfg.called_from_update_ranges:
_LOG.warning('Layer: %s No bounding box in ranges for native CRS %s - rerun update_ranges.py', self.name, self.native_CRS)
self.hide = True
return
self.origin_x = native_bounding_box['left']
self.origin_y = native_bounding_box['bottom']
if native_bounding_box['right'] - native_bounding_box['left'] < self.resolution_x:
ConfigException('Native (%s) bounding box on layer %s has left %.8f, right %.8f (diff %d), but horizontal resolution is %.8f' % (self.native_CRS, self.name, native_bounding_box['left'], native_bounding_box['right'], native_bounding_box['right'] - native_bounding_box['left'], self.resolution_x))
if native_bounding_box['top'] - native_bounding_box['bottom'] < self.resolution_x:
ConfigException('Native (%s) bounding box on layer %s has bottom %f, top %f (diff %d), but vertical resolution is %f' % (self.native_CRS, self.name, native_bounding_box['bottom'], native_bounding_box['top'], native_bounding_box['top'] - native_bounding_box['bottom'], self.resolution_y))
self.grid_high_x = abs(int((native_bounding_box['right'] - native_bounding_box['left']) / self.resolution_x))
self.grid_high_y = int((native_bounding_box['bottom'] - native_bounding_box['top']) / self.resolution_y)
if self.grid_high_x <= 0:
err_str = f"Grid High x is non-positive on layer {self.name}: native ({self.native_CRS}) extent: {native_bounding_box['left']},{native_bounding_box['right']}: x_res={self.resolution_x}"
raise ConfigException(err_str)
if self.grid_high_y <= 0:
err_str = f"Grid High y is non-positive on layer {self.name}: native ({self.native_CRS}) extent: {native_bounding_box['bottom']},{native_bounding_box['top']}: y_res={self.resolution_y}"
raise ConfigException(err_str)
self.grids = {}
for (crs, crs_def) in self.global_cfg.published_CRSs.items():
if crs == self.native_CRS:
self.grids[crs] = {'origin': (self.origin_x, self.origin_y), 'resolution': (self.resolution_x, self.resolution_y)}
else:
try:
bbox = self.bboxes[crs]
except KeyError:
continue
self.grids[crs] = {'origin': (bbox['left'], bbox['bottom']), 'resolution': ((bbox['right'] - bbox['left']) / self.grid_high_x, (bbox['top'] - bbox['bottom']) / self.grid_high_y)}
</DeepExtract>
for style in self.styles:
style.make_ready(dc, *args, **kwargs)
for fpb in self.allflag_productbands:
fpb.make_ready(dc, *args, **kwargs)
if not self.multi_product:
self.global_cfg.native_product_index[self.product_name] = self
if not self.hide:
super().make_ready(dc, *args, **kwargs)
|
def make_ready(self, dc, *args, **kwargs):
self.products = []
self.low_res_products = []
for (i, prod_name) in enumerate(self.product_names):
if self.low_res_product_names:
low_res_prod_name = self.low_res_product_names[i]
else:
low_res_prod_name = None
product = dc.index.products.get_by_name(prod_name)
if not product:
raise ConfigException(f'Could not find product {prod_name} in datacube for layer {self.name}')
self.products.append(product)
if low_res_prod_name:
product = dc.index.products.get_by_name(low_res_prod_name)
if not product:
raise ConfigException(f'Could not find product {low_res_prod_name} in datacube for layer {self.name}')
self.low_res_products.append(product)
self.product = self.products[0]
self.definition = self.product.definition
if dc:
dc = dc
else:
dc = get_cube()
self.hide = False
self._ranges = None
try:
from datacube_ows.product_ranges import get_ranges
self._ranges = get_ranges(dc, self)
if self._ranges is None:
raise Exception('Null product range')
self.bboxes = self.extract_bboxes()
if self.default_time_rule == DEF_TIME_EARLIEST:
self.default_time = self._ranges['start_time']
elif isinstance(self.default_time_rule, datetime.date) and self.default_time_rule in self._ranges['time_set']:
self.default_time = self.default_time_rule
elif isinstance(self.default_time_rule, datetime.date):
_LOG.warning('default_time for named_layer %s is explicit date (%s) that is not available for the layer. Using most recent available date instead.', self.name, self.default_time_rule.isoformat())
self.default_time = self._ranges['end_time']
else:
self.default_time = self._ranges['end_time']
except Exception as a:
if not self.global_cfg.called_from_update_ranges:
_LOG.warning('get_ranges failed for layer %s: %s', self.name, str(a))
self.hide = True
self.bboxes = {}
self.band_idx.make_ready(dc)
self.resource_limits.make_ready(dc)
self.all_flag_band_names = set()
for fb in self.flag_bands.values():
fb.make_ready(dc)
if fb.pq_band in self.all_flag_band_names:
raise ConfigException(f'Duplicate flag band name: {fb.pq_band}')
self.all_flag_band_names.add(fb.pq_band)
self.always_fetch_bands = list([self.band_idx.band(b) for b in self.raw_afb])
try:
self.native_CRS = self.product.definition['storage']['crs']
if self.cfg_native_crs == self.native_CRS:
_LOG.debug('Native crs for layer %s is specified in ODC metadata and does not need to be specified in configuration', self.name)
else:
_LOG.warning('Native crs for layer %s is specified in config as %s - overridden to %s by ODC metadata', self.name, self.cfg_native_crs, self.native_CRS)
except KeyError:
self.native_CRS = self.cfg_native_crs
if not self.native_CRS:
raise ConfigException(f'No native CRS could be found for layer {self.name}')
if self.native_CRS not in self.global_cfg.published_CRSs:
raise ConfigException(f'Native CRS for product {self.product_name} in layer {self.name} ({self.native_CRS}) not in published CRSs')
self.native_CRS_def = self.global_cfg.published_CRSs[self.native_CRS]
try:
self.resolution_x = self.product.definition['storage']['resolution'][self.native_CRS_def['horizontal_coord']]
self.resolution_y = self.product.definition['storage']['resolution'][self.native_CRS_def['vertical_coord']]
except KeyError:
self.resolution_x = None
self.resolution_y = None
if self.resolution_x is None:
try:
if self.cfg_native_resolution is None:
raise KeyError
(self.resolution_x, self.resolution_y) = self.cfg_native_resolution
except KeyError:
raise ConfigException(f'No native resolution supplied for layer {self.name} with no product-native resolution defined in ODC.')
except ValueError:
raise ConfigException(f'Invalid native resolution supplied for layer {self.name}')
except TypeError:
raise ConfigException(f'Invalid native resolution supplied for layer {self.name}')
elif self.cfg_native_resolution:
(config_x, config_y) = (float(r) for r in self.cfg_native_resolution)
if math.isclose(config_x, float(self.resolution_x), rel_tol=1e-08) and math.isclose(config_y, float(self.resolution_y), rel_tol=1e-08):
_LOG.debug('Native resolution for layer %s is specified in ODC metadata and does not need to be specified in configuration', self.name)
else:
_LOG.warning('Native resolution for layer %s is specified in config as %s - overridden to (%.15f, %.15f) by ODC metadata', self.name, repr(self.cfg_native_resolution), self.resolution_x, self.resolution_y)
if self.global_cfg.wcs:
if self.global_cfg.wcs and self.wcs:
try:
native_bounding_box = self.bboxes[self.native_CRS]
except KeyError:
if not self.global_cfg.called_from_update_ranges:
_LOG.warning('Layer: %s No bounding box in ranges for native CRS %s - rerun update_ranges.py', self.name, self.native_CRS)
self.hide = True
return
self.origin_x = native_bounding_box['left']
self.origin_y = native_bounding_box['bottom']
if native_bounding_box['right'] - native_bounding_box['left'] < self.resolution_x:
ConfigException('Native (%s) bounding box on layer %s has left %.8f, right %.8f (diff %d), but horizontal resolution is %.8f' % (self.native_CRS, self.name, native_bounding_box['left'], native_bounding_box['right'], native_bounding_box['right'] - native_bounding_box['left'], self.resolution_x))
if native_bounding_box['top'] - native_bounding_box['bottom'] < self.resolution_x:
ConfigException('Native (%s) bounding box on layer %s has bottom %f, top %f (diff %d), but vertical resolution is %f' % (self.native_CRS, self.name, native_bounding_box['bottom'], native_bounding_box['top'], native_bounding_box['top'] - native_bounding_box['bottom'], self.resolution_y))
self.grid_high_x = abs(int((native_bounding_box['right'] - native_bounding_box['left']) / self.resolution_x))
self.grid_high_y = int((native_bounding_box['bottom'] - native_bounding_box['top']) / self.resolution_y)
if self.grid_high_x <= 0:
err_str = f"Grid High x is non-positive on layer {self.name}: native ({self.native_CRS}) extent: {native_bounding_box['left']},{native_bounding_box['right']}: x_res={self.resolution_x}"
raise ConfigException(err_str)
if self.grid_high_y <= 0:
err_str = f"Grid High y is non-positive on layer {self.name}: native ({self.native_CRS}) extent: {native_bounding_box['bottom']},{native_bounding_box['top']}: y_res={self.resolution_y}"
raise ConfigException(err_str)
self.grids = {}
for (crs, crs_def) in self.global_cfg.published_CRSs.items():
if crs == self.native_CRS:
self.grids[crs] = {'origin': (self.origin_x, self.origin_y), 'resolution': (self.resolution_x, self.resolution_y)}
else:
try:
bbox = self.bboxes[crs]
except KeyError:
continue
self.grids[crs] = {'origin': (bbox['left'], bbox['bottom']), 'resolution': ((bbox['right'] - bbox['left']) / self.grid_high_x, (bbox['top'] - bbox['bottom']) / self.grid_high_y)}
for style in self.styles:
style.make_ready(dc, *args, **kwargs)
for fpb in self.allflag_productbands:
fpb.make_ready(dc, *args, **kwargs)
if not self.multi_product:
self.global_cfg.native_product_index[self.product_name] = self
if not self.hide:
super().make_ready(dc, *args, **kwargs)
|
datacube-ows
|
positive
|
def set_img_size(self, img_size: Tuple[int, int]) -> None:
"""Sets the image shape, and calls the methods that need this information
for running."""
<DeepExtract>
if 1 <= self._verbose:
print(f"[ GPUCorrel level {1} debug] {f'Setting master resolution: {img_size},'}")
</DeepExtract>
if self._context is not None:
GPUCorrel.context = self._context
else:
pycuda.driver.init()
GPUCorrel.context = pycuda.tools.make_default_context()
src_txt = '\n texture<float, cudaTextureType2D, cudaReadModeElementType> texFx{0};\n texture<float, cudaTextureType2D, cudaReadModeElementType> texFy{0};\n __global__ void resample{0}(float* outX, float* outY, int x, int y)\n {{\n int idx = blockIdx.x*blockDim.x+threadIdx.x;\n int idy = blockIdx.y*blockDim.y+threadIdx.y;\n if(idx < x && idy < y)\n {{\n outX[idy*x+idx] = tex2D(texFx{0},(float)idx/x, (float)idy/y);\n outY[idy*x+idx] = tex2D(texFy{0},(float)idx/x, (float)idy/y);\n }}\n }}\n '
src = ''.join([src_txt.format(i) for i in range(self._n_fields)])
source_module = pycuda.compiler.SourceModule(src)
self._tex_fx = [source_module.get_texref(f'texFx{i}') for i in range(self._n_fields)]
self._tex_fy = [source_module.get_texref(f'texFy{i}') for i in range(self._n_fields)]
self._resample = [source_module.get_function(f'resample{i}') for i in range(self._n_fields)]
for (tex_fx, tex_fy, resample) in zip(self._tex_fx, self._tex_fy, self._resample):
resample.prepare('PPii', texrefs=[tex_fx, tex_fy])
for tex in chain(self._tex_fx, self._tex_fy):
tex.set_flags(pycuda.driver.TRSF_NORMALIZED_COORDINATES)
tex.set_filter_mode(pycuda.driver.filter_mode.LINEAR)
tex.set_address_mode(0, pycuda.driver.address_mode.BORDER)
tex.set_address_mode(1, pycuda.driver.address_mode.BORDER)
(height, width, *_) = img_size
self._heights = [round(height / self._resampling_factor ** i) for i in range(self._levels)]
self._widths = [round(width / self._resampling_factor ** i) for i in range(self._levels)]
self._stages = [CorrelStage(img_size=(height, width), verbose=self._verbose, n_fields=self._n_fields, iterations=self._iterations, mul=self._mul, kernel_file=self._kernel_file) for (i, (height, width)) in enumerate(zip(self._heights, self._widths))]
if self._ref_img is not None:
<DeepExtract>
if isinstance(self._ref_img, np.ndarray):
self._debug(3, 'Setting original image from ndarray')
self.dev_orig.set(self._ref_img)
elif isinstance(self._ref_img, gpuarray.GPUArray):
self._debug(3, 'Setting original image from GPUArray')
self.dev_orig = self._ref_img
else:
raise ValueError('Error ! Unknown type of data given to set_orig()')
self.update_orig()
</DeepExtract>
if self._fields is not None:
<DeepExtract>
for (field_str, tex_fx, tex_fy) in zip(self._fields, self._tex_fx, self._tex_fy):
(field_x, field_y) = get_field(field_str, self._heights[0], self._widths[0])
tex_fx.set_array(pycuda.driver.matrix_to_array(field_x, 'C'))
tex_fy.set_array(pycuda.driver.matrix_to_array(field_y, 'C'))
for (stage, height, width) in zip(self._stages, self._heights, self._widths):
stage.set_fields(*self._get_fields(height, width))
</DeepExtract>
if self._mask is not None:
<DeepExtract>
for (stage, height, width) in zip(self._stages, self._heights, self._widths):
stage.set_mask(interp_nearest(self._mask, height, width))
</DeepExtract>
|
def set_img_size(self, img_size: Tuple[int, int]) -> None:
"""Sets the image shape, and calls the methods that need this information
for running."""
if 1 <= self._verbose:
print(f"[ GPUCorrel level {1} debug] {f'Setting master resolution: {img_size},'}")
if self._context is not None:
GPUCorrel.context = self._context
else:
pycuda.driver.init()
GPUCorrel.context = pycuda.tools.make_default_context()
src_txt = '\n texture<float, cudaTextureType2D, cudaReadModeElementType> texFx{0};\n texture<float, cudaTextureType2D, cudaReadModeElementType> texFy{0};\n __global__ void resample{0}(float* outX, float* outY, int x, int y)\n {{\n int idx = blockIdx.x*blockDim.x+threadIdx.x;\n int idy = blockIdx.y*blockDim.y+threadIdx.y;\n if(idx < x && idy < y)\n {{\n outX[idy*x+idx] = tex2D(texFx{0},(float)idx/x, (float)idy/y);\n outY[idy*x+idx] = tex2D(texFy{0},(float)idx/x, (float)idy/y);\n }}\n }}\n '
src = ''.join([src_txt.format(i) for i in range(self._n_fields)])
source_module = pycuda.compiler.SourceModule(src)
self._tex_fx = [source_module.get_texref(f'texFx{i}') for i in range(self._n_fields)]
self._tex_fy = [source_module.get_texref(f'texFy{i}') for i in range(self._n_fields)]
self._resample = [source_module.get_function(f'resample{i}') for i in range(self._n_fields)]
for (tex_fx, tex_fy, resample) in zip(self._tex_fx, self._tex_fy, self._resample):
resample.prepare('PPii', texrefs=[tex_fx, tex_fy])
for tex in chain(self._tex_fx, self._tex_fy):
tex.set_flags(pycuda.driver.TRSF_NORMALIZED_COORDINATES)
tex.set_filter_mode(pycuda.driver.filter_mode.LINEAR)
tex.set_address_mode(0, pycuda.driver.address_mode.BORDER)
tex.set_address_mode(1, pycuda.driver.address_mode.BORDER)
(height, width, *_) = img_size
self._heights = [round(height / self._resampling_factor ** i) for i in range(self._levels)]
self._widths = [round(width / self._resampling_factor ** i) for i in range(self._levels)]
self._stages = [CorrelStage(img_size=(height, width), verbose=self._verbose, n_fields=self._n_fields, iterations=self._iterations, mul=self._mul, kernel_file=self._kernel_file) for (i, (height, width)) in enumerate(zip(self._heights, self._widths))]
if self._ref_img is not None:
if isinstance(self._ref_img, np.ndarray):
self._debug(3, 'Setting original image from ndarray')
self.dev_orig.set(self._ref_img)
elif isinstance(self._ref_img, gpuarray.GPUArray):
self._debug(3, 'Setting original image from GPUArray')
self.dev_orig = self._ref_img
else:
raise ValueError('Error ! Unknown type of data given to set_orig()')
self.update_orig()
if self._fields is not None:
for (field_str, tex_fx, tex_fy) in zip(self._fields, self._tex_fx, self._tex_fy):
(field_x, field_y) = get_field(field_str, self._heights[0], self._widths[0])
tex_fx.set_array(pycuda.driver.matrix_to_array(field_x, 'C'))
tex_fy.set_array(pycuda.driver.matrix_to_array(field_y, 'C'))
for (stage, height, width) in zip(self._stages, self._heights, self._widths):
stage.set_fields(*self._get_fields(height, width))
if self._mask is not None:
for (stage, height, width) in zip(self._stages, self._heights, self._widths):
stage.set_mask(interp_nearest(self._mask, height, width))
</DeepExtract>
|
crappy
|
positive
|
@register_model
def tf_mixnet_l(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['tf_mixnet_l']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
<DeepExtract>
arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model = GenEfficientNet(_decode_arch_def(arch_def), num_classes=num_classes, stem_size=24, num_features=1536, channel_multiplier=1.3, bn_args=_resolve_bn_args(kwargs), act_fn=F.relu, **kwargs)
model = model
</DeepExtract>
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
@register_model
def tf_mixnet_l(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['tf_mixnet_l']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model = GenEfficientNet(_decode_arch_def(arch_def), num_classes=num_classes, stem_size=24, num_features=1536, channel_multiplier=1.3, bn_args=_resolve_bn_args(kwargs), act_fn=F.relu, **kwargs)
model = model
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
|
DNA
|
positive
|
def request_vm(self, src_ip):
<DeepExtract>
self.guest_lock.acquire()
try:
usable_guests = self.get_guest_states(['used', 'using'])
for guest in usable_guests:
if src_ip in guest['client_ips']:
guest = guest
finally:
self.guest_lock.release()
guest = None
</DeepExtract>
if not guest:
<DeepExtract>
self.guest_lock.acquire()
try:
available_guests = self.get_guest_states(['available'])
for guest in available_guests:
guest = guest
finally:
self.guest_lock.release()
guest = None
</DeepExtract>
if not guest and self.share_guests:
<DeepExtract>
self.guest_lock.acquire()
try:
least_conn = None
usable_guests = self.get_guest_states(['using', 'used'])
for guest in usable_guests:
if not least_conn or guest['connected'] < least_conn['connected']:
least_conn = guest
guest = least_conn
finally:
self.guest_lock.release()
</DeepExtract>
if not guest:
if self.any_vm_up:
log.msg('Inconsistent state in pool, restarting...')
<DeepExtract>
import libvirt
log.msg(eventid='cowrie.backend_pool.service', format='Trying pool clean stop')
if self.loop_next_call:
self.loop_next_call.cancel()
for guest in self.guests:
self.qemu.destroy_guest(guest['domain'], guest['snapshot'])
self.qemu.destroy_all_cowrie()
if not self.local_pool and self.use_nat or self.pool_only:
log.msg(eventid='cowrie.backend_pool.service', format='Free all NAT bindings')
self.nat_service.free_all()
try:
self.qemu.stop_backend()
except libvirt.libvirtError:
print('Not connected to QEMU')
</DeepExtract>
raise NoAvailableVMs()
guest['prev_state'] = guest['state']
guest['state'] = 'using'
guest['connected'] += 1
guest['client_ips'].add(src_ip)
return (guest['id'], guest['guest_ip'], guest['snapshot'])
|
def request_vm(self, src_ip):
self.guest_lock.acquire()
try:
usable_guests = self.get_guest_states(['used', 'using'])
for guest in usable_guests:
if src_ip in guest['client_ips']:
guest = guest
finally:
self.guest_lock.release()
guest = None
if not guest:
self.guest_lock.acquire()
try:
available_guests = self.get_guest_states(['available'])
for guest in available_guests:
guest = guest
finally:
self.guest_lock.release()
guest = None
if not guest and self.share_guests:
self.guest_lock.acquire()
try:
least_conn = None
usable_guests = self.get_guest_states(['using', 'used'])
for guest in usable_guests:
if not least_conn or guest['connected'] < least_conn['connected']:
least_conn = guest
guest = least_conn
finally:
self.guest_lock.release()
if not guest:
if self.any_vm_up:
log.msg('Inconsistent state in pool, restarting...')
import libvirt
log.msg(eventid='cowrie.backend_pool.service', format='Trying pool clean stop')
if self.loop_next_call:
self.loop_next_call.cancel()
for guest in self.guests:
self.qemu.destroy_guest(guest['domain'], guest['snapshot'])
self.qemu.destroy_all_cowrie()
if not self.local_pool and self.use_nat or self.pool_only:
log.msg(eventid='cowrie.backend_pool.service', format='Free all NAT bindings')
self.nat_service.free_all()
try:
self.qemu.stop_backend()
except libvirt.libvirtError:
print('Not connected to QEMU')
raise NoAvailableVMs()
guest['prev_state'] = guest['state']
guest['state'] = 'using'
guest['connected'] += 1
guest['client_ips'].add(src_ip)
return (guest['id'], guest['guest_ip'], guest['snapshot'])
|
cowrie
|
positive
|
def create_snapshot(params):
<DeepExtract>
if 'create_db_cluster_snapshot' == 'copy_db_cluster_snapshot':
params['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier']
required_options = get_boto3_client_method_parameters(client, 'create_db_cluster_snapshot', required=True)
if any((params.get(k) is None for k in required_options)):
module.fail_json(msg='To {0} requires the parameters: {1}'.format(get_rds_method_attribute('create_db_cluster_snapshot', module).operation_description, required_options))
options = get_boto3_client_method_parameters(client, 'create_db_cluster_snapshot')
params = dict(((k, v) for (k, v) in params.items() if k in options and v is not None))
method_params = params
</DeepExtract>
if method_params.get('Tags'):
method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
(_snapshot, changed) = call_method(client, module, 'create_db_cluster_snapshot', method_params)
return changed
|
def create_snapshot(params):
if 'create_db_cluster_snapshot' == 'copy_db_cluster_snapshot':
params['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier']
required_options = get_boto3_client_method_parameters(client, 'create_db_cluster_snapshot', required=True)
if any((params.get(k) is None for k in required_options)):
module.fail_json(msg='To {0} requires the parameters: {1}'.format(get_rds_method_attribute('create_db_cluster_snapshot', module).operation_description, required_options))
options = get_boto3_client_method_parameters(client, 'create_db_cluster_snapshot')
params = dict(((k, v) for (k, v) in params.items() if k in options and v is not None))
method_params = params
if method_params.get('Tags'):
method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags'])
(_snapshot, changed) = call_method(client, module, 'create_db_cluster_snapshot', method_params)
return changed
|
amazon.aws
|
positive
|
def _src_deps(src_fn):
fd = os.open(src_fn, os.O_RDONLY)
try:
h = self._rpm_ts.hdrFromFdno(fd)
except rpm.error as e:
if str(e) == 'error reading package header':
e = _("Failed to open: '%s', not a valid source rpm file.") % src_fn
os.close(fd)
raise dnf.exceptions.Error(e)
os.close(fd)
ds = h.dsFromHeader('requirename')
done = True
for dep in ds:
<DeepExtract>
reldep_str = dep.DNEVR()[2:]
</DeepExtract>
if reldep_str.startswith('rpmlib('):
continue
done &= self._install(reldep_str)
if not done:
err = _('Not all dependencies satisfied')
raise dnf.exceptions.Error(err)
if self.opts.define:
logger.warning(_('Warning: -D or --define arguments have no meaning for source rpm packages.'))
|
def _src_deps(src_fn):
fd = os.open(src_fn, os.O_RDONLY)
try:
h = self._rpm_ts.hdrFromFdno(fd)
except rpm.error as e:
if str(e) == 'error reading package header':
e = _("Failed to open: '%s', not a valid source rpm file.") % src_fn
os.close(fd)
raise dnf.exceptions.Error(e)
os.close(fd)
ds = h.dsFromHeader('requirename')
done = True
for dep in ds:
reldep_str = dep.DNEVR()[2:]
if reldep_str.startswith('rpmlib('):
continue
done &= self._install(reldep_str)
if not done:
err = _('Not all dependencies satisfied')
raise dnf.exceptions.Error(err)
if self.opts.define:
logger.warning(_('Warning: -D or --define arguments have no meaning for source rpm packages.'))
|
dnf-plugins-core
|
positive
|
def get_global_long_short_accounts(self, symbol, period, startTime, endTime, limit):
builder = UrlParamsBuilder()
builder.put_url('symbol', symbol)
builder.put_url('period', period)
builder.put_url('startTime', startTime)
builder.put_url('endTime', endTime)
builder.put_url('limit', limit)
<DeepExtract>
request = RestApiRequest()
request.method = 'GET'
request.host = self.__server_url
request.header.update({'Content-Type': 'application/json'})
request.url = '/futures/data/globalLongShortAccountRatio' + '?' + builder.build_url()
request = request
</DeepExtract>
def parse(json_wrapper):
result = list()
data_list = json_wrapper.convert_2_array()
for item in data_list.get_items():
element = LongShortRatio.json_parse(item)
result.append(element)
return result
request.json_parser = parse
return request
|
def get_global_long_short_accounts(self, symbol, period, startTime, endTime, limit):
builder = UrlParamsBuilder()
builder.put_url('symbol', symbol)
builder.put_url('period', period)
builder.put_url('startTime', startTime)
builder.put_url('endTime', endTime)
builder.put_url('limit', limit)
request = RestApiRequest()
request.method = 'GET'
request.host = self.__server_url
request.header.update({'Content-Type': 'application/json'})
request.url = '/futures/data/globalLongShortAccountRatio' + '?' + builder.build_url()
request = request
def parse(json_wrapper):
result = list()
data_list = json_wrapper.convert_2_array()
for item in data_list.get_items():
element = LongShortRatio.json_parse(item)
result.append(element)
return result
request.json_parser = parse
return request
|
Binance_Futures_python
|
positive
|
def get_inference_from_file(lineProb_st):
lineProb = [float(x) for x in lineProb_st]
if FLAGS.Cmap == 'CancerType':
NumberOfClasses = len(lineProb)
class_all = []
sum_class = 0
for nC in range(1, NumberOfClasses):
class_all.append(float(lineProb[nC]))
sum_class = sum_class + float(lineProb[nC])
for nC in range(NumberOfClasses - 1):
class_all[nC] = class_all[nC] / sum_class
current_score = max(class_all)
oClass = class_all.index(max(class_all)) + 1
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
if len(thresholds) != len(class_all):
print('Error: There must be one threshold per class:')
probDiff = []
for nC in range(len(class_all)):
probDiff.append(class_all[nC] - thresholds[nC])
oClass = probDiff.index(max(probDiff)) + 1
current_score = class_all[oClass - 1]
score_correction = thresholds[oClass - 1]
else:
score_correction = 1.0 / len(class_all)
if oClass == 1:
if len(class_all) == 2:
c = mcolors.ColorConverter().to_rgb
<DeepExtract>
[c('white'), c('red')] = [(None,) * 3, 0.0] + list([c('white'), c('red')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('red')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('red')][i - 1]
(r2, g2, b2) = [c('white'), c('red')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
</DeepExtract>
else:
cmap = plt.get_cmap('binary')
elif oClass == 2:
if len(class_all) == 2:
c = mcolors.ColorConverter().to_rgb
<DeepExtract>
[c('white'), c('blue')] = [(None,) * 3, 0.0] + list([c('white'), c('blue')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('blue')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('blue')][i - 1]
(r2, g2, b2) = [c('white'), c('blue')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
</DeepExtract>
else:
c = mcolors.ColorConverter().to_rgb
<DeepExtract>
[c('white'), c('green')] = [(None,) * 3, 0.0] + list([c('white'), c('green')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('green')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('green')][i - 1]
(r2, g2, b2) = [c('white'), c('green')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
</DeepExtract>
elif oClass == 3:
c = mcolors.ColorConverter().to_rgb
<DeepExtract>
[c('white'), c('blue')] = [(None,) * 3, 0.0] + list([c('white'), c('blue')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('blue')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('blue')][i - 1]
(r2, g2, b2) = [c('white'), c('blue')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
</DeepExtract>
elif oClass == 4:
c = mcolors.ColorConverter().to_rgb
<DeepExtract>
[c('white'), c('red')] = [(None,) * 3, 0.0] + list([c('white'), c('red')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('red')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('red')][i - 1]
(r2, g2, b2) = [c('white'), c('red')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
</DeepExtract>
cmap = plt.get_cmap('Oranges')
elif oClass == 5:
cmap = plt.get_cmap('Greens')
else:
cmap = plt.get_cmap('Purples')
print(lineProb)
print(oClass, current_score, (current_score - score_correction) / (1.0 - score_correction), [class_all[1], class_all[2], class_all[3]])
return (oClass, cmap, (current_score - score_correction) / (1.0 - score_correction), [class_all[0], class_all[1], class_all[2], class_all[3]])
|
def get_inference_from_file(lineProb_st):
lineProb = [float(x) for x in lineProb_st]
if FLAGS.Cmap == 'CancerType':
NumberOfClasses = len(lineProb)
class_all = []
sum_class = 0
for nC in range(1, NumberOfClasses):
class_all.append(float(lineProb[nC]))
sum_class = sum_class + float(lineProb[nC])
for nC in range(NumberOfClasses - 1):
class_all[nC] = class_all[nC] / sum_class
current_score = max(class_all)
oClass = class_all.index(max(class_all)) + 1
if FLAGS.thresholds is not None:
thresholds = FLAGS.thresholds
thresholds = [float(x) for x in thresholds.split(',')]
if len(thresholds) != len(class_all):
print('Error: There must be one threshold per class:')
probDiff = []
for nC in range(len(class_all)):
probDiff.append(class_all[nC] - thresholds[nC])
oClass = probDiff.index(max(probDiff)) + 1
current_score = class_all[oClass - 1]
score_correction = thresholds[oClass - 1]
else:
score_correction = 1.0 / len(class_all)
if oClass == 1:
if len(class_all) == 2:
c = mcolors.ColorConverter().to_rgb
[c('white'), c('red')] = [(None,) * 3, 0.0] + list([c('white'), c('red')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('red')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('red')][i - 1]
(r2, g2, b2) = [c('white'), c('red')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
else:
cmap = plt.get_cmap('binary')
elif oClass == 2:
if len(class_all) == 2:
c = mcolors.ColorConverter().to_rgb
[c('white'), c('blue')] = [(None,) * 3, 0.0] + list([c('white'), c('blue')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('blue')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('blue')][i - 1]
(r2, g2, b2) = [c('white'), c('blue')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
else:
c = mcolors.ColorConverter().to_rgb
[c('white'), c('green')] = [(None,) * 3, 0.0] + list([c('white'), c('green')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('green')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('green')][i - 1]
(r2, g2, b2) = [c('white'), c('green')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
elif oClass == 3:
c = mcolors.ColorConverter().to_rgb
[c('white'), c('blue')] = [(None,) * 3, 0.0] + list([c('white'), c('blue')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('blue')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('blue')][i - 1]
(r2, g2, b2) = [c('white'), c('blue')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
elif oClass == 4:
c = mcolors.ColorConverter().to_rgb
[c('white'), c('red')] = [(None,) * 3, 0.0] + list([c('white'), c('red')]) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for (i, item) in enumerate([c('white'), c('red')]):
if isinstance(item, float):
(r1, g1, b1) = [c('white'), c('red')][i - 1]
(r2, g2, b2) = [c('white'), c('red')][i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
cmap = mcolors.LinearSegmentedColormap('CustomMap', cdict)
cmap = plt.get_cmap('Oranges')
elif oClass == 5:
cmap = plt.get_cmap('Greens')
else:
cmap = plt.get_cmap('Purples')
print(lineProb)
print(oClass, current_score, (current_score - score_correction) / (1.0 - score_correction), [class_all[1], class_all[2], class_all[3]])
return (oClass, cmap, (current_score - score_correction) / (1.0 - score_correction), [class_all[0], class_all[1], class_all[2], class_all[3]])
|
DeepPATH
|
positive
|
@micropython.viper
def flash_wait_status(n: int):
retry = n
mask = 1
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
swspi.write(read_status)
swspi.readinto(status)
while retry > 0:
swspi.readinto(status)
if int(status[0]) & mask == 0:
break
sleep_ms(1)
retry -= 1
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
<DeepExtract>
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
</DeepExtract>
if retry <= 0:
print('error %d flash status 0x%02X & 0x%02X != 0' % (n, status[0], mask))
|
@micropython.viper
def flash_wait_status(n: int):
retry = n
mask = 1
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
swspi.write(read_status)
swspi.readinto(status)
while retry > 0:
swspi.readinto(status)
if int(status[0]) & mask == 0:
break
sleep_ms(1)
retry -= 1
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if 0:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if 1:
tms.on()
else:
tms.off()
tck.off()
tck.on()
if retry <= 0:
print('error %d flash status 0x%02X & 0x%02X != 0' % (n, status[0], mask))
|
esp32ecp5
|
positive
|
@mock.patch.object(super_tuner.Tuner, '__init__', autospec=True)
@mock.patch.object(tf.summary, 'create_file_writer', autospec=True)
@mock.patch.object(hparams_api, 'hparams', autospec=True)
def test_add_logging_not_specified(self, mock_hparams, mock_create_file_writer, mock_super_tuner):
<DeepExtract>
directory = directory or self._remote_dir
remote_tuner = tuner.DistributingCloudTuner(hypermodel=build_model, objective=None, study_config=self._study_config, hyperparameters=None, max_trials=max_trials, project_id=self._project_id, region=self._region, directory=directory, study_id=self._study_id, container_uri=self._container_uri)
</DeepExtract>
callbacks = []
remote_tuner._add_logging(callbacks, self._test_trial)
expected_logdir = os.path.join(remote_tuner.directory, self._test_trial.trial_id, 'logs')
self.assertLen(callbacks, 1)
self.assertEqual(callbacks[0].log_dir, expected_logdir)
|
@mock.patch.object(super_tuner.Tuner, '__init__', autospec=True)
@mock.patch.object(tf.summary, 'create_file_writer', autospec=True)
@mock.patch.object(hparams_api, 'hparams', autospec=True)
def test_add_logging_not_specified(self, mock_hparams, mock_create_file_writer, mock_super_tuner):
directory = directory or self._remote_dir
remote_tuner = tuner.DistributingCloudTuner(hypermodel=build_model, objective=None, study_config=self._study_config, hyperparameters=None, max_trials=max_trials, project_id=self._project_id, region=self._region, directory=directory, study_id=self._study_id, container_uri=self._container_uri)
callbacks = []
remote_tuner._add_logging(callbacks, self._test_trial)
expected_logdir = os.path.join(remote_tuner.directory, self._test_trial.trial_id, 'logs')
self.assertLen(callbacks, 1)
self.assertEqual(callbacks[0].log_dir, expected_logdir)
|
cloud
|
positive
|
def validation_step(self, batch):
<DeepExtract>
raise NotImplementedError
</DeepExtract>
<DeepExtract>
def has_one_axis(X):
return hasattr('loss', 'ndim') and 'loss'.ndim == 1 or (isinstance('loss', list) and (not hasattr('loss'[0], '__len__')))
if has_one_axis('loss'):
'loss' = ['loss']
if l is None:
('loss', l) = ([[]] * len('loss'), 'loss')
elif has_one_axis(l):
l = [l]
if len('loss') != len(l):
'loss' = 'loss' * len(l)
set_figsize(figsize)
if axes is None:
axes = d2l.plt.gca()
axes.cla()
for (x, y, fmt) in zip('loss', l, fmts):
axes.plot(x, y, fmt) if len(x) else axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
</DeepExtract>
|
def validation_step(self, batch):
raise NotImplementedError
def has_one_axis(X):
return hasattr('loss', 'ndim') and 'loss'.ndim == 1 or (isinstance('loss', list) and (not hasattr('loss'[0], '__len__')))
if has_one_axis('loss'):
'loss' = ['loss']
if l is None:
('loss', l) = ([[]] * len('loss'), 'loss')
elif has_one_axis(l):
l = [l]
if len('loss') != len(l):
'loss' = 'loss' * len(l)
set_figsize(figsize)
if axes is None:
axes = d2l.plt.gca()
axes.cla()
for (x, y, fmt) in zip('loss', l, fmts):
axes.plot(x, y, fmt) if len(x) else axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
</DeepExtract>
|
d2l-en
|
positive
|
def _remove_team_resources(namespace: str, team_spec: str, logger: kopf.Logger, **_: Any):
v1 = CoreV1Api()
logger.info(f'_remove_team_resources looking with orbit/label={team_spec}')
label_selector = f'orbit/team={team_spec}'
all_namespaces = v1.list_namespace(label_selector=label_selector).to_dict()
all_ns = [item.get('metadata').get('name') for item in all_namespaces['items'] if item.get('metadata', {}).get('name')]
custom_object_list = [['sagemaker.aws.amazon.com', 'v1', 'hyperparametertuningjobs', 'trainingJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'trainingjobs', 'trainingJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'batchtransformjobs', 'transformJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'hostingdeployments', 'status'], ['kubeflow.org', 'v1', 'notebooks', 'NA'], ['kubeflow.org', 'v1', 'profile', 'NA'], ['batch', 'v1', 'jobs', 'NA'], ['apps', 'v1', 'deployments', 'NA'], ['apps', 'v1', 'statefulsets', 'NA']]
for namespace in all_ns:
logger.info(f'Looking at NS {namespace}')
for co in custom_object_list:
<DeepExtract>
logger.info(f'Deleting {co[2]}.{co[0]} in ns {namespace}')
co = CustomObjectsApi()
try:
resp = co.delete_collection_namespaced_custom_object(group=co[0], version=co[1], namespace=namespace, plural=co[2], grace_period_seconds=0, propagation_policy='Background', pretty='true', async_req=use_async, body=V1DeleteOptions())
return resp
except ApiException as e:
logger.warn('calling CustomObjectsApi->delete_collection_namespaced_custom_object: %s\n' % e)
logger.warn('Assume it did not exist')
</DeepExtract>
<DeepExtract>
logger.info(f'Deleting ALL PODS in ns {namespace}')
api = CoreV1Api()
try:
api.delete_collection_namespaced_pod(namespace=namespace, async_req=use_async, grace_period_seconds=0, propagation_policy='Background', body=V1DeleteOptions())
except ApiException as e:
logger.warn('calling CustomObjectsApi->delete_collection_namespaced_pod: %s\n' % e)
</DeepExtract>
for co in custom_object_list[0:4]:
<DeepExtract>
logger.info(f'_patch_and_delete_stubborn_custom_resources for {co[2]}.{co[0]} in namespace {namespace}')
co = CustomObjectsApi()
resp = co.list_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace)
failed_res = [item.get('metadata').get('name') for item in resp['items'] if item.get('status', {}).get(co[3]) in ['Failed', 'Completed', 'InProgress']]
for item in failed_res:
try:
logger.info(f'Patching item {item} in {co[2]}.{co[0]}')
patch = json.loads('{"metadata":{"finalizers":[]}}')
co.patch_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace, name=item, body=patch)
logger.info(f'Deleting item {item} in {co[2]}.{co[0]}')
co.delete_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace, name=item)
except ApiException as e:
logger.warn('Trying to patch and delete failed: %s\n' % e)
</DeepExtract>
|
def _remove_team_resources(namespace: str, team_spec: str, logger: kopf.Logger, **_: Any):
v1 = CoreV1Api()
logger.info(f'_remove_team_resources looking with orbit/label={team_spec}')
label_selector = f'orbit/team={team_spec}'
all_namespaces = v1.list_namespace(label_selector=label_selector).to_dict()
all_ns = [item.get('metadata').get('name') for item in all_namespaces['items'] if item.get('metadata', {}).get('name')]
custom_object_list = [['sagemaker.aws.amazon.com', 'v1', 'hyperparametertuningjobs', 'trainingJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'trainingjobs', 'trainingJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'batchtransformjobs', 'transformJobStatus'], ['sagemaker.aws.amazon.com', 'v1', 'hostingdeployments', 'status'], ['kubeflow.org', 'v1', 'notebooks', 'NA'], ['kubeflow.org', 'v1', 'profile', 'NA'], ['batch', 'v1', 'jobs', 'NA'], ['apps', 'v1', 'deployments', 'NA'], ['apps', 'v1', 'statefulsets', 'NA']]
for namespace in all_ns:
logger.info(f'Looking at NS {namespace}')
for co in custom_object_list:
logger.info(f'Deleting {co[2]}.{co[0]} in ns {namespace}')
co = CustomObjectsApi()
try:
resp = co.delete_collection_namespaced_custom_object(group=co[0], version=co[1], namespace=namespace, plural=co[2], grace_period_seconds=0, propagation_policy='Background', pretty='true', async_req=use_async, body=V1DeleteOptions())
return resp
except ApiException as e:
logger.warn('calling CustomObjectsApi->delete_collection_namespaced_custom_object: %s\n' % e)
logger.warn('Assume it did not exist')
logger.info(f'Deleting ALL PODS in ns {namespace}')
api = CoreV1Api()
try:
api.delete_collection_namespaced_pod(namespace=namespace, async_req=use_async, grace_period_seconds=0, propagation_policy='Background', body=V1DeleteOptions())
except ApiException as e:
logger.warn('calling CustomObjectsApi->delete_collection_namespaced_pod: %s\n' % e)
for co in custom_object_list[0:4]:
logger.info(f'_patch_and_delete_stubborn_custom_resources for {co[2]}.{co[0]} in namespace {namespace}')
co = CustomObjectsApi()
resp = co.list_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace)
failed_res = [item.get('metadata').get('name') for item in resp['items'] if item.get('status', {}).get(co[3]) in ['Failed', 'Completed', 'InProgress']]
for item in failed_res:
try:
logger.info(f'Patching item {item} in {co[2]}.{co[0]}')
patch = json.loads('{"metadata":{"finalizers":[]}}')
co.patch_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace, name=item, body=patch)
logger.info(f'Deleting item {item} in {co[2]}.{co[0]}')
co.delete_namespaced_custom_object(group=co[0], version=co[1], plural=co[2], namespace=namespace, name=item)
except ApiException as e:
logger.warn('Trying to patch and delete failed: %s\n' % e)
</DeepExtract>
|
aws-orbit-workbench
|
positive
|
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
<DeepExtract>
if isinstance(kernel_size, (list, tuple)):
if len(kernel_size) != 2:
raise ValueError('Must be a list with 2 elements: %s' % kernel_size)
(kernel_h, kernel_w) = (int(kernel_size[0]), int(kernel_size[1]))
if isinstance(kernel_size, int):
(kernel_h, kernel_w) = (int(kernel_size), int(kernel_size))
if isinstance(kernel_size, tf.TensorShape):
if len(kernel_size) == 2:
(kernel_h, kernel_w) = (kernel_size[0], kernel_size[1])
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of length 2')
</DeepExtract>
<DeepExtract>
if isinstance(stride, (list, tuple)):
if len(stride) != 2:
raise ValueError('Must be a list with 2 elements: %s' % stride)
(stride_h, stride_w) = (int(stride[0]), int(stride[1]))
if isinstance(stride, int):
(stride_h, stride_w) = (int(stride), int(stride))
if isinstance(stride, tf.TensorShape):
if len(stride) == 2:
(stride_h, stride_w) = (stride[0], stride[1])
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of length 2')
</DeepExtract>
return tf.nn.avg_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding)
|
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
if isinstance(kernel_size, (list, tuple)):
if len(kernel_size) != 2:
raise ValueError('Must be a list with 2 elements: %s' % kernel_size)
(kernel_h, kernel_w) = (int(kernel_size[0]), int(kernel_size[1]))
if isinstance(kernel_size, int):
(kernel_h, kernel_w) = (int(kernel_size), int(kernel_size))
if isinstance(kernel_size, tf.TensorShape):
if len(kernel_size) == 2:
(kernel_h, kernel_w) = (kernel_size[0], kernel_size[1])
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of length 2')
if isinstance(stride, (list, tuple)):
if len(stride) != 2:
raise ValueError('Must be a list with 2 elements: %s' % stride)
(stride_h, stride_w) = (int(stride[0]), int(stride[1]))
if isinstance(stride, int):
(stride_h, stride_w) = (int(stride), int(stride))
if isinstance(stride, tf.TensorShape):
if len(stride) == 2:
(stride_h, stride_w) = (stride[0], stride[1])
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of length 2')
return tf.nn.avg_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding)
|
deeplearning-benchmark
|
positive
|
@pytest.mark.usefixtures('products')
def test_get_float_average(get_product_flat):
<DeepExtract>
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
</DeepExtract>
<DeepExtract>
assert sorted(data, key=str) == sorted([[2.3]], key=str)
</DeepExtract>
|
@pytest.mark.usefixtures('products')
def test_get_float_average(get_product_flat):
def helper(queries, *args, **kwargs):
res = 1(queries, *args, **kwargs)
if res['body'] == []:
assert res['cols'] == []
assert res['rows'] == []
data = []
else:
assert res['cols'] == [[]]
data = [r + b for (r, b) in zip(res['rows'], res['body'][0])]
data = helper
assert sorted(data, key=str) == sorted([[2.3]], key=str)
</DeepExtract>
|
django-data-browser
|
positive
|
def test_metadata(tmpdir):
<DeepExtract>
p = tmpdir.mkdir('cuckoo').join('data.json')
p.write(json.dumps({'behavior': {}, 'info': {'added': 1553807600.200415, 'started': 1553810186.098672, 'duration': 325, 'ended': 1553810511.668111, 'owner': '', 'score': 10.4, 'id': 1003314, 'category': 'file', 'git': {'head': '03731c4c136532389e93239ac6c3ad38441f81a7', 'fetch_head': '03731c4c136532389e93239ac6c3ad38441f81a7'}, 'monitor': '22c39cbb35f4d916477b47453673bc50bcd0df09', 'package': 'exe', 'route': 'internet', 'custom': '', 'machine': {'status': 'stopped', 'name': 'win7x6415', 'label': 'win7x6415', 'manager': 'VirtualBox', 'started_on': '2019-03-28 21:56:26', 'shutdown_on': '2019-03-28 22:01:47'}, 'platform': 'windows', 'version': '2.0.6', 'options': 'procmemdump=yes'}, 'target': {'category': 'file', 'file': {'sha1': '8338f79279b7126791e0937d1c3933f259e5d658', 'name': 'It6QworVAgY.exe', 'type': 'PE32 executable (GUI) Intel 80386, for MS Windows', 'sha256': 'c1db4b2578729a1faede84d2735eb8463bfd2c6b15d2fdf2de7a89f1954d0dfb', 'urls': ['http://ocsp.usertrust.com0'], 'crc32': '660E35BC', 'path': '/srv/cuckoo/cwd/storage/binaries/c1db4b2578729a1faede84d2735eb8463bfd2c6b15d2fdf2de7a89f1954d0dfb', 'ssdeep': '3072:RNkhoRdoQbxSTcbrh82bQZfR3pKHJL1cx0W5yOpIX:RNgo3oInbQZp5MJL1cs7', 'size': 206088, 'sha512': '8f705313d7c240e72967ac3dfc0d9e3d72090e39e51dd05e803a439a78430946945f87aa596112461aedee68a472a7880a25bb6d5e019615162fa6c35a8108b2', 'md5': '44b696079356579d250f716a37ca9b17'}}}))
f = p
</DeepExtract>
assert CuckooReport(f).metadata() == {'machine': 'win7x6415', 'package': 'exe', 'score': 10.4, 'report_id': 1003314, 'category': 'file', 'name': 'It6QworVAgY.exe', 'type': 'PE32 executable (GUI) Intel 80386, for MS Windows'}
|
def test_metadata(tmpdir):
p = tmpdir.mkdir('cuckoo').join('data.json')
p.write(json.dumps({'behavior': {}, 'info': {'added': 1553807600.200415, 'started': 1553810186.098672, 'duration': 325, 'ended': 1553810511.668111, 'owner': '', 'score': 10.4, 'id': 1003314, 'category': 'file', 'git': {'head': '03731c4c136532389e93239ac6c3ad38441f81a7', 'fetch_head': '03731c4c136532389e93239ac6c3ad38441f81a7'}, 'monitor': '22c39cbb35f4d916477b47453673bc50bcd0df09', 'package': 'exe', 'route': 'internet', 'custom': '', 'machine': {'status': 'stopped', 'name': 'win7x6415', 'label': 'win7x6415', 'manager': 'VirtualBox', 'started_on': '2019-03-28 21:56:26', 'shutdown_on': '2019-03-28 22:01:47'}, 'platform': 'windows', 'version': '2.0.6', 'options': 'procmemdump=yes'}, 'target': {'category': 'file', 'file': {'sha1': '8338f79279b7126791e0937d1c3933f259e5d658', 'name': 'It6QworVAgY.exe', 'type': 'PE32 executable (GUI) Intel 80386, for MS Windows', 'sha256': 'c1db4b2578729a1faede84d2735eb8463bfd2c6b15d2fdf2de7a89f1954d0dfb', 'urls': ['http://ocsp.usertrust.com0'], 'crc32': '660E35BC', 'path': '/srv/cuckoo/cwd/storage/binaries/c1db4b2578729a1faede84d2735eb8463bfd2c6b15d2fdf2de7a89f1954d0dfb', 'ssdeep': '3072:RNkhoRdoQbxSTcbrh82bQZfR3pKHJL1cx0W5yOpIX:RNgo3oInbQZp5MJL1cs7', 'size': 206088, 'sha512': '8f705313d7c240e72967ac3dfc0d9e3d72090e39e51dd05e803a439a78430946945f87aa596112461aedee68a472a7880a25bb6d5e019615162fa6c35a8108b2', 'md5': '44b696079356579d250f716a37ca9b17'}}}))
f = p
assert CuckooReport(f).metadata() == {'machine': 'win7x6415', 'package': 'exe', 'score': 10.4, 'report_id': 1003314, 'category': 'file', 'name': 'It6QworVAgY.exe', 'type': 'PE32 executable (GUI) Intel 80386, for MS Windows'}
|
beagle
|
positive
|
def getuvdata(cno, disk=1):
"""
get AIPSUVData object at a specified AIPS Catalogue number and AIPS Disk.
Args:
cno (int):
AIPS Catalogue Number
disk (int, default=1):
Number of AIPS Disk
Returns:
AIPSUVData object
"""
<DeepExtract>
if AIPS.userno == 0:
raise ValueError('Please set AIPS USER NO with setuser(XX) or AIPS.userno=XX')
try:
catlist = AIPSCat()[disk]
except KeyError:
raise ValueError('disk=%d does not exist.' % disk)
cat = catlist
if False:
if len(catlist) == 0:
if False:
print('pcat: disk=%d is empty.' % disk)
else:
for catdata in catlist:
print('%3d: %12s.%6s.%3d.%3d %s %s %s' % (catdata['cno'], catdata['name'], catdata['klass'], catdata['seq'], disk, catdata['type'], catdata['time'], catdata['date']))
if True:
cat = catlist
</DeepExtract>
isdata = False
for catdata in cat:
if catdata['cno'] == cno:
isdata = True
break
if not isdata:
raise ValueError('No data at cno=%d and disk=%d' % (cno, disk))
return AIPSUVData(catdata['name'], catdata['klass'], disk, catdata['seq'])
|
def getuvdata(cno, disk=1):
"""
get AIPSUVData object at a specified AIPS Catalogue number and AIPS Disk.
Args:
cno (int):
AIPS Catalogue Number
disk (int, default=1):
Number of AIPS Disk
Returns:
AIPSUVData object
"""
if AIPS.userno == 0:
raise ValueError('Please set AIPS USER NO with setuser(XX) or AIPS.userno=XX')
try:
catlist = AIPSCat()[disk]
except KeyError:
raise ValueError('disk=%d does not exist.' % disk)
cat = catlist
if False:
if len(catlist) == 0:
if False:
print('pcat: disk=%d is empty.' % disk)
else:
for catdata in catlist:
print('%3d: %12s.%6s.%3d.%3d %s %s %s' % (catdata['cno'], catdata['name'], catdata['klass'], catdata['seq'], disk, catdata['type'], catdata['time'], catdata['date']))
if True:
cat = catlist
isdata = False
for catdata in cat:
if catdata['cno'] == cno:
isdata = True
break
if not isdata:
raise ValueError('No data at cno=%d and disk=%d' % (cno, disk))
return AIPSUVData(catdata['name'], catdata['klass'], disk, catdata['seq'])
|
eat
|
positive
|
@tracer.capture_method
def on_order_deleted(order: dict):
"""
Process an OrderDeleted event
"""
order_id = order['orderId']
<DeepExtract>
res = table.get_item(Key={'orderId': order_id, 'productId': METADATA_KEY})
metadata = res.get('Item', None)
</DeepExtract>
if metadata is None or metadata['status'] != 'NEW':
logger.info({'message': 'Trying to delete packaging request for inexisting order {}'.format(order_id), 'orderId': order_id})
return
logger.info({'message': 'Delete packaging request for order {}'.format(order_id), 'orderId': order_id})
<DeepExtract>
count = 0
with table.batch_writer() as batch:
for product in order['products'] or get_products(order_id):
if product['productId'] == METADATA_KEY:
continue
count += 1
batch.delete_item(Key={'orderId': order_id, 'productId': product['productId']})
logger.debug({'message': 'Deleting product {} for order {}'.format(product['productId'], order_id), 'operation': 'delete', 'product': product, 'orderId': order_id})
logger.info({'message': 'Deleting {} products for order {}'.format(count, order_id), 'operation': 'delete', 'orderId': order_id, 'productCount': count})
</DeepExtract>
<DeepExtract>
table.delete_item(Key={'orderId': order_id, 'productId': METADATA_KEY})
</DeepExtract>
|
@tracer.capture_method
def on_order_deleted(order: dict):
"""
Process an OrderDeleted event
"""
order_id = order['orderId']
res = table.get_item(Key={'orderId': order_id, 'productId': METADATA_KEY})
metadata = res.get('Item', None)
if metadata is None or metadata['status'] != 'NEW':
logger.info({'message': 'Trying to delete packaging request for inexisting order {}'.format(order_id), 'orderId': order_id})
return
logger.info({'message': 'Delete packaging request for order {}'.format(order_id), 'orderId': order_id})
count = 0
with table.batch_writer() as batch:
for product in order['products'] or get_products(order_id):
if product['productId'] == METADATA_KEY:
continue
count += 1
batch.delete_item(Key={'orderId': order_id, 'productId': product['productId']})
logger.debug({'message': 'Deleting product {} for order {}'.format(product['productId'], order_id), 'operation': 'delete', 'product': product, 'orderId': order_id})
logger.info({'message': 'Deleting {} products for order {}'.format(count, order_id), 'operation': 'delete', 'orderId': order_id, 'productCount': count})
table.delete_item(Key={'orderId': order_id, 'productId': METADATA_KEY})
</DeepExtract>
|
aws-serverless-ecommerce-platform
|
positive
|
def test_terminate(self):
<DeepExtract>
which_exe = shutil.which(self.mpirun.launch_command)
self.assertTrue(which_exe is not None, f"'{self.mpirun.launch_command}' not in PATH")
</DeepExtract>
self.mpirun.start(os.getcwd(), self.script_output)
start = time.time()
self.mpirun.terminate()
self.mpirun.wait()
end = time.time()
self.assertLessEqual(end - start, self.sleep_sec)
|
def test_terminate(self):
which_exe = shutil.which(self.mpirun.launch_command)
self.assertTrue(which_exe is not None, f"'{self.mpirun.launch_command}' not in PATH")
self.mpirun.start(os.getcwd(), self.script_output)
start = time.time()
self.mpirun.terminate()
self.mpirun.wait()
end = time.time()
self.assertLessEqual(end - start, self.sleep_sec)
|
balsam
|
positive
|
def _make_jobs(self, channel=1):
"""Scan the instrument for available sources of data construct a list of jobs"""
encoding_table = {WaveType.MATH: ('FPBinary', 16, 'f'), WaveType.DIGITAL: ('RIBinary', 16, 'h'), WaveType.ANALOG: ('RIBinary', 16, 'h')}
channel_table = {WaveType.MATH: lambda x: x, WaveType.DIGITAL: lambda x: '_'.join([x.split('_')[0], 'DALL']), WaveType.ANALOG: lambda x: x}
results = {}
sources = ['CH' + str(channel)]
for source in sources:
if source.split('_')[0] not in results:
<DeepExtract>
if source[0:4] == 'MATH':
wave_type = WaveType.MATH
elif '_' in source:
wave_type = WaveType.DIGITAL
else:
wave_type = WaveType.ANALOG
wave_type = wave_type
</DeepExtract>
channel = channel_table[wave_type](source)
(encoding, bit_nr, datatype) = encoding_table[wave_type]
rec_len = int(self._intf.query('horizontal:recordlength?').strip())
results[source.split('_')[0]] = JobParameters(wave_type, channel, encoding, bit_nr, datatype, rec_len)
return results
|
def _make_jobs(self, channel=1):
"""Scan the instrument for available sources of data construct a list of jobs"""
encoding_table = {WaveType.MATH: ('FPBinary', 16, 'f'), WaveType.DIGITAL: ('RIBinary', 16, 'h'), WaveType.ANALOG: ('RIBinary', 16, 'h')}
channel_table = {WaveType.MATH: lambda x: x, WaveType.DIGITAL: lambda x: '_'.join([x.split('_')[0], 'DALL']), WaveType.ANALOG: lambda x: x}
results = {}
sources = ['CH' + str(channel)]
for source in sources:
if source.split('_')[0] not in results:
if source[0:4] == 'MATH':
wave_type = WaveType.MATH
elif '_' in source:
wave_type = WaveType.DIGITAL
else:
wave_type = WaveType.ANALOG
wave_type = wave_type
channel = channel_table[wave_type](source)
(encoding, bit_nr, datatype) = encoding_table[wave_type]
rec_len = int(self._intf.query('horizontal:recordlength?').strip())
results[source.split('_')[0]] = JobParameters(wave_type, channel, encoding, bit_nr, datatype, rec_len)
return results
|
basil
|
positive
|
def bag_attention(x, scope, query, rel_tot, is_training, var_scope=None, dropout_before=False, keep_prob=1.0):
with tf.variable_scope(var_scope or 'attention', reuse=tf.AUTO_REUSE):
if is_training:
if dropout_before:
<DeepExtract>
x = tf.contrib.layers.dropout(x, keep_prob=keep_prob)
</DeepExtract>
bag_repre = []
<DeepExtract>
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
current_relation = tf.nn.embedding_lookup(relation_matrix, query)
attention_logit = tf.reduce_sum(current_relation * x, -1)
attention_logit = attention_logit
</DeepExtract>
for i in range(scope.shape[0]):
bag_hidden_mat = x[scope[i][0]:scope[i][1]]
attention_score = tf.nn.softmax(attention_logit[scope[i][0]:scope[i][1]], -1)
bag_repre.append(tf.squeeze(tf.matmul(tf.expand_dims(attention_score, 0), bag_hidden_mat)))
bag_repre = tf.stack(bag_repre)
if not dropout_before:
<DeepExtract>
bag_repre = tf.contrib.layers.dropout(bag_repre, keep_prob=keep_prob)
</DeepExtract>
return (__logit__(bag_repre, rel_tot), bag_repre)
else:
<DeepExtract>
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
attention_logit = tf.matmul(x, tf.transpose(relation_matrix))
attention_logit = attention_logit
</DeepExtract>
bag_repre = []
bag_logit = []
for i in range(scope.shape[0]):
bag_hidden_mat = x[scope[i][0]:scope[i][1]]
attention_score = tf.nn.softmax(tf.transpose(attention_logit[scope[i][0]:scope[i][1], :]), -1)
bag_repre_for_each_rel = tf.matmul(attention_score, bag_hidden_mat)
<DeepExtract>
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, bag_repre_for_each_rel.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
logit = tf.matmul(bag_repre_for_each_rel, tf.transpose(relation_matrix)) + bias
bag_logit_for_each_rel = logit
</DeepExtract>
bag_repre.append(bag_repre_for_each_rel)
bag_logit.append(tf.diag_part(tf.nn.softmax(bag_logit_for_each_rel, -1)))
bag_repre = tf.stack(bag_repre)
bag_logit = tf.stack(bag_logit)
return (bag_logit, bag_repre)
|
def bag_attention(x, scope, query, rel_tot, is_training, var_scope=None, dropout_before=False, keep_prob=1.0):
with tf.variable_scope(var_scope or 'attention', reuse=tf.AUTO_REUSE):
if is_training:
if dropout_before:
x = tf.contrib.layers.dropout(x, keep_prob=keep_prob)
bag_repre = []
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
current_relation = tf.nn.embedding_lookup(relation_matrix, query)
attention_logit = tf.reduce_sum(current_relation * x, -1)
attention_logit = attention_logit
for i in range(scope.shape[0]):
bag_hidden_mat = x[scope[i][0]:scope[i][1]]
attention_score = tf.nn.softmax(attention_logit[scope[i][0]:scope[i][1]], -1)
bag_repre.append(tf.squeeze(tf.matmul(tf.expand_dims(attention_score, 0), bag_hidden_mat)))
bag_repre = tf.stack(bag_repre)
if not dropout_before:
bag_repre = tf.contrib.layers.dropout(bag_repre, keep_prob=keep_prob)
return (__logit__(bag_repre, rel_tot), bag_repre)
else:
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
attention_logit = tf.matmul(x, tf.transpose(relation_matrix))
attention_logit = attention_logit
bag_repre = []
bag_logit = []
for i in range(scope.shape[0]):
bag_hidden_mat = x[scope[i][0]:scope[i][1]]
attention_score = tf.nn.softmax(tf.transpose(attention_logit[scope[i][0]:scope[i][1], :]), -1)
bag_repre_for_each_rel = tf.matmul(attention_score, bag_hidden_mat)
with tf.variable_scope(var_scope or 'logit', reuse=tf.AUTO_REUSE):
relation_matrix = tf.get_variable('relation_matrix', shape=[rel_tot, bag_repre_for_each_rel.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', shape=[rel_tot], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
logit = tf.matmul(bag_repre_for_each_rel, tf.transpose(relation_matrix)) + bias
bag_logit_for_each_rel = logit
bag_repre.append(bag_repre_for_each_rel)
bag_logit.append(tf.diag_part(tf.nn.softmax(bag_logit_for_each_rel, -1)))
bag_repre = tf.stack(bag_repre)
bag_logit = tf.stack(bag_logit)
return (bag_logit, bag_repre)
|
CPL
|
positive
|
def _parse_sample(text: str) -> Sample:
try:
(label_start, label_end) = (text.index('{'), text.rindex('}'))
name = text[:label_start].strip()
label = text[label_start + 1:label_end]
<DeepExtract>
text[label_end + 1:] = text[label_end + 1:].lstrip()
separator = ' '
if separator not in text[label_end + 1:]:
separator = '\t'
values = [value.strip() for value in text[label_end + 1:].split(separator) if value.strip()]
if not values:
(value, timestamp) = (float(text[label_end + 1:]), None)
value = float(values[0])
timestamp = float(values[-1]) / 1000 if len(values) > 1 else None
(value, timestamp) = (value, timestamp)
</DeepExtract>
return Sample(name, _parse_labels(label), value, timestamp)
except ValueError:
separator = ' '
if separator not in text:
separator = '\t'
name_end = text.index(separator)
name = text[:name_end]
<DeepExtract>
text[name_end:] = text[name_end:].lstrip()
separator = ' '
if separator not in text[name_end:]:
separator = '\t'
values = [value.strip() for value in text[name_end:].split(separator) if value.strip()]
if not values:
(value, timestamp) = (float(text[name_end:]), None)
value = float(values[0])
timestamp = float(values[-1]) / 1000 if len(values) > 1 else None
(value, timestamp) = (value, timestamp)
</DeepExtract>
return Sample(name, {}, value, timestamp)
|
def _parse_sample(text: str) -> Sample:
try:
(label_start, label_end) = (text.index('{'), text.rindex('}'))
name = text[:label_start].strip()
label = text[label_start + 1:label_end]
text[label_end + 1:] = text[label_end + 1:].lstrip()
separator = ' '
if separator not in text[label_end + 1:]:
separator = '\t'
values = [value.strip() for value in text[label_end + 1:].split(separator) if value.strip()]
if not values:
(value, timestamp) = (float(text[label_end + 1:]), None)
value = float(values[0])
timestamp = float(values[-1]) / 1000 if len(values) > 1 else None
(value, timestamp) = (value, timestamp)
return Sample(name, _parse_labels(label), value, timestamp)
except ValueError:
separator = ' '
if separator not in text:
separator = '\t'
name_end = text.index(separator)
name = text[:name_end]
text[name_end:] = text[name_end:].lstrip()
separator = ' '
if separator not in text[name_end:]:
separator = '\t'
values = [value.strip() for value in text[name_end:].split(separator) if value.strip()]
if not values:
(value, timestamp) = (float(text[name_end:]), None)
value = float(values[0])
timestamp = float(values[-1]) / 1000 if len(values) > 1 else None
(value, timestamp) = (value, timestamp)
return Sample(name, {}, value, timestamp)
|
client_python
|
positive
|
def index_titles(since=None):
"""index all the titles and holdings that are modeled in the database
if you pass in a datetime object as the since parameter only title
records that have been created since that time will be indexed.
"""
solr = SolrConnection(settings.SOLR)
titles = models.Title.objects.all()
if since:
titles = titles.filter(created__gte=since)
titles = titles.prefetch_related('languages', 'alt_titles', 'subjects', 'notes', 'places', 'urls', 'essays', 'country', 'holdings')
count = 0
for chunk in sliced(titles, 500):
docs = []
for title in chunk:
try:
docs.append(title.solr_doc)
except Exception:
LOGGER.exception('Unable to index title %s', title)
solr.add_many(docs)
reset_queries()
solr.commit()
count += len(chunk)
LOGGER.info('indexed %d titles', count)
lccns = set(models.Title.objects.values_list('lccn', flat=True))
for result in solr.query('+type:title', fields=['id', 'lccn']):
stale_id = result['id']
lccn = result['lccn']
if lccn not in lccns:
LOGGER.warning('Removing stale title %s from the search index', stale_id)
<DeepExtract>
if not solr:
solr = SolrConnection(settings.SOLR)
if isinstance(stale_id, models.Title):
title_id = stale_id.url
else:
title_id = stale_id
q = '+type:title +id:%s' % title_id
solr.delete_query(q)
LOGGER.info('deleted title %s from the index', stale_id)
</DeepExtract>
solr.commit()
|
def index_titles(since=None):
"""index all the titles and holdings that are modeled in the database
if you pass in a datetime object as the since parameter only title
records that have been created since that time will be indexed.
"""
solr = SolrConnection(settings.SOLR)
titles = models.Title.objects.all()
if since:
titles = titles.filter(created__gte=since)
titles = titles.prefetch_related('languages', 'alt_titles', 'subjects', 'notes', 'places', 'urls', 'essays', 'country', 'holdings')
count = 0
for chunk in sliced(titles, 500):
docs = []
for title in chunk:
try:
docs.append(title.solr_doc)
except Exception:
LOGGER.exception('Unable to index title %s', title)
solr.add_many(docs)
reset_queries()
solr.commit()
count += len(chunk)
LOGGER.info('indexed %d titles', count)
lccns = set(models.Title.objects.values_list('lccn', flat=True))
for result in solr.query('+type:title', fields=['id', 'lccn']):
stale_id = result['id']
lccn = result['lccn']
if lccn not in lccns:
LOGGER.warning('Removing stale title %s from the search index', stale_id)
if not solr:
solr = SolrConnection(settings.SOLR)
if isinstance(stale_id, models.Title):
title_id = stale_id.url
else:
title_id = stale_id
q = '+type:title +id:%s' % title_id
solr.delete_query(q)
LOGGER.info('deleted title %s from the index', stale_id)
solr.commit()
|
chronam
|
positive
|
def step(self, batch):
<DeepExtract>
if len(self.required_D_index) == 0:
losses_d = {}
with torch.no_grad():
fake_data = self.generator.forward_train(**batch)
real_scores = self.discriminator(**batch, with_pose=True)
fake_scores = {}
for idx in self.required_D_index:
fake_scores[idx] = self.discriminator.forward_fake(**batch, with_pose=True, fake_img=fake_data[idx])
batch = {key: item for (key, item) in batch.items()}
batch['fake_data'] = fake_data
batch['real_scores'] = real_scores
batch['fake_scores'] = fake_scores
loss_funcs = [c.d_loss for c in self.criterions_D]
log = self._backward(batch, loss_funcs, self.discriminator, self.d_optimizer, id_offset=0)
for i in range(len(real_scores)):
log[f'real_score{i}'] = real_scores[i].mean().detach()
for (_, fake_score) in fake_scores.items():
log[f'fake_score{i}'] = fake_score.mean().detach()
losses_d = log
</DeepExtract>
<DeepExtract>
for p in self.discriminator.parameters():
p.requires_grad = False
fake_data = self.generator.forward_train(**batch)
fake_scores = {}
for idx in self.required_D_index:
fake_scores[idx] = self.discriminator.forward_fake(**batch, fake_img=fake_data[idx])
batch = {key: item for (key, item) in batch.items()}
batch['fake_data'] = fake_data
batch['fake_scores'] = fake_scores
self.mask = batch['mask']
if any((c.NEED_REAL_SCORE_GENERATOR for c in self.criterions_G)):
real_scores = self.discriminator(**batch)
batch['real_scores'] = real_scores
loss_funcs = [c.g_loss for c in self.criterions_G]
log = self._backward(batch, loss_funcs, self.generator, self.g_optimizer, id_offset=len(self.criterions_D))
del self.mask
for p in self.discriminator.parameters():
p.requires_grad = True
losses_g = log
</DeepExtract>
if losses_d is None or losses_g is None:
return None
self.it += 1
losses = {**losses_d, **losses_g}
return losses
|
def step(self, batch):
if len(self.required_D_index) == 0:
losses_d = {}
with torch.no_grad():
fake_data = self.generator.forward_train(**batch)
real_scores = self.discriminator(**batch, with_pose=True)
fake_scores = {}
for idx in self.required_D_index:
fake_scores[idx] = self.discriminator.forward_fake(**batch, with_pose=True, fake_img=fake_data[idx])
batch = {key: item for (key, item) in batch.items()}
batch['fake_data'] = fake_data
batch['real_scores'] = real_scores
batch['fake_scores'] = fake_scores
loss_funcs = [c.d_loss for c in self.criterions_D]
log = self._backward(batch, loss_funcs, self.discriminator, self.d_optimizer, id_offset=0)
for i in range(len(real_scores)):
log[f'real_score{i}'] = real_scores[i].mean().detach()
for (_, fake_score) in fake_scores.items():
log[f'fake_score{i}'] = fake_score.mean().detach()
losses_d = log
for p in self.discriminator.parameters():
p.requires_grad = False
fake_data = self.generator.forward_train(**batch)
fake_scores = {}
for idx in self.required_D_index:
fake_scores[idx] = self.discriminator.forward_fake(**batch, fake_img=fake_data[idx])
batch = {key: item for (key, item) in batch.items()}
batch['fake_data'] = fake_data
batch['fake_scores'] = fake_scores
self.mask = batch['mask']
if any((c.NEED_REAL_SCORE_GENERATOR for c in self.criterions_G)):
real_scores = self.discriminator(**batch)
batch['real_scores'] = real_scores
loss_funcs = [c.g_loss for c in self.criterions_G]
log = self._backward(batch, loss_funcs, self.generator, self.g_optimizer, id_offset=len(self.criterions_D))
del self.mask
for p in self.discriminator.parameters():
p.requires_grad = True
losses_g = log
if losses_d is None or losses_g is None:
return None
self.it += 1
losses = {**losses_d, **losses_g}
return losses
|
DeepPrivacy
|
positive
|
@patch('enot.global_properties.GlobalProperties.conf_dir', new_callable=PropertyMock)
@patch('enot.global_properties.ensure_conf_file')
@patch.object(EnotCache, 'get_versions', return_value=['1.0.0'])
def test_list_installed(self, _, mock_conf, mock_conf_dir):
mock_conf.return_value = self.conf_file
mock_conf_dir.return_value = join(self.test_dir, 'conf')
apps = ['test_app', 'test_app1', 'test_app2']
for name in apps:
create(self.test_dir, {'<name>': name})
pack_path = join(self.test_dir, name)
set_git_url(pack_path, 'http://github/comtihon/' + name)
set_git_tag(pack_path, '1.0.0')
test_install_dir = join(self.test_dir, 'test_install_' + name)
modify_config(pack_path, {'install': [{'shell': 'mkdir ' + test_install_dir}, {'shell': 'cp ebin/*.* ' + test_install_dir}]})
<DeepExtract>
builder = Builder.init_from_path(pack_path)
builder.populate()
self.assertEqual(True, builder.build())
controller = Controller()
self.assertEqual(True, controller.local_cache.add_package(builder.project))
return builder
</DeepExtract>
self.assertEqual(True, Controller().install('comtihon/' + name, None))
installed_expected = [{'name': 'comtihon/' + app, 'vsn': '1.0.0'} for app in apps]
self.assertEqual(installed_expected, Controller().installed())
|
@patch('enot.global_properties.GlobalProperties.conf_dir', new_callable=PropertyMock)
@patch('enot.global_properties.ensure_conf_file')
@patch.object(EnotCache, 'get_versions', return_value=['1.0.0'])
def test_list_installed(self, _, mock_conf, mock_conf_dir):
mock_conf.return_value = self.conf_file
mock_conf_dir.return_value = join(self.test_dir, 'conf')
apps = ['test_app', 'test_app1', 'test_app2']
for name in apps:
create(self.test_dir, {'<name>': name})
pack_path = join(self.test_dir, name)
set_git_url(pack_path, 'http://github/comtihon/' + name)
set_git_tag(pack_path, '1.0.0')
test_install_dir = join(self.test_dir, 'test_install_' + name)
modify_config(pack_path, {'install': [{'shell': 'mkdir ' + test_install_dir}, {'shell': 'cp ebin/*.* ' + test_install_dir}]})
builder = Builder.init_from_path(pack_path)
builder.populate()
self.assertEqual(True, builder.build())
controller = Controller()
self.assertEqual(True, controller.local_cache.add_package(builder.project))
return builder
self.assertEqual(True, Controller().install('comtihon/' + name, None))
installed_expected = [{'name': 'comtihon/' + app, 'vsn': '1.0.0'} for app in apps]
self.assertEqual(installed_expected, Controller().installed())
|
enot
|
positive
|
def stop_units(self, units):
""" fails if any unit fails to stop """
<DeepExtract>
target = target or SysInitTarget
for attempt in xrange(int(SysInitWait)):
state = self.is_system_running()
if 'init' in state:
if target in ['sysinit.target', 'basic.target']:
logg.debug('system not initialized - wait %s', target)
time.sleep(1)
continue
if 'start' in state or 'stop' in state:
if target in ['basic.target']:
logg.debug('system not running - wait %s', target)
time.sleep(1)
continue
if 'running' not in state:
logg.debug('system is %s', state)
break
</DeepExtract>
done = True
for unit in self.sortedBefore(units):
if not self.stop_unit(unit):
done = False
return done
|
def stop_units(self, units):
""" fails if any unit fails to stop """
target = target or SysInitTarget
for attempt in xrange(int(SysInitWait)):
state = self.is_system_running()
if 'init' in state:
if target in ['sysinit.target', 'basic.target']:
logg.debug('system not initialized - wait %s', target)
time.sleep(1)
continue
if 'start' in state or 'stop' in state:
if target in ['basic.target']:
logg.debug('system not running - wait %s', target)
time.sleep(1)
continue
if 'running' not in state:
logg.debug('system is %s', state)
break
done = True
for unit in self.sortedBefore(units):
if not self.stop_unit(unit):
done = False
return done
|
deployment
|
positive
|
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
<DeepExtract>
ndim = voxel_minmax.shape[-1] // 2
center = voxel_minmax[..., :ndim]
dims = voxel_minmax[..., ndim:] - center
voxel_corners = center_to_corner_box3d(center, dims, origin=0.0)
</DeepExtract>
<DeepExtract>
corners = corners_nd(gt_boxes[:, 3:6], origin=[0.5, 0.5, 0.5])
if gt_boxes[:, 6] is not None:
corners = rotation_3d_in_axis(corners, gt_boxes[:, 6], axis=2)
corners += gt_boxes[:, :3].reshape([-1, 1, 3])
gt_box_corners = corners
</DeepExtract>
<DeepExtract>
surfaces = np.array([[gt_box_corners[:, 0], gt_box_corners[:, 1], gt_box_corners[:, 2], gt_box_corners[:, 3]], [gt_box_corners[:, 7], gt_box_corners[:, 6], gt_box_corners[:, 5], gt_box_corners[:, 4]], [gt_box_corners[:, 0], gt_box_corners[:, 3], gt_box_corners[:, 7], gt_box_corners[:, 4]], [gt_box_corners[:, 1], gt_box_corners[:, 5], gt_box_corners[:, 6], gt_box_corners[:, 2]], [gt_box_corners[:, 0], gt_box_corners[:, 4], gt_box_corners[:, 5], gt_box_corners[:, 1]], [gt_box_corners[:, 3], gt_box_corners[:, 2], gt_box_corners[:, 6], gt_box_corners[:, 7]]]).transpose([2, 0, 1, 3])
gt_surfaces = surfaces
</DeepExtract>
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
|
def assign_label_to_voxel_v3(gt_boxes, coors, voxel_size, coors_range):
"""assign a 0/1 label to each voxel based on whether
the center of voxel is in gt_box. LIDAR.
"""
voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)
coors_range = np.array(coors_range, dtype=gt_boxes.dtype)
shift = coors_range[:3]
voxel_origins = coors[:, ::-1] * voxel_size + shift
voxel_maxes = voxel_origins + voxel_size
voxel_minmax = np.concatenate([voxel_origins, voxel_maxes], axis=-1)
ndim = voxel_minmax.shape[-1] // 2
center = voxel_minmax[..., :ndim]
dims = voxel_minmax[..., ndim:] - center
voxel_corners = center_to_corner_box3d(center, dims, origin=0.0)
corners = corners_nd(gt_boxes[:, 3:6], origin=[0.5, 0.5, 0.5])
if gt_boxes[:, 6] is not None:
corners = rotation_3d_in_axis(corners, gt_boxes[:, 6], axis=2)
corners += gt_boxes[:, :3].reshape([-1, 1, 3])
gt_box_corners = corners
surfaces = np.array([[gt_box_corners[:, 0], gt_box_corners[:, 1], gt_box_corners[:, 2], gt_box_corners[:, 3]], [gt_box_corners[:, 7], gt_box_corners[:, 6], gt_box_corners[:, 5], gt_box_corners[:, 4]], [gt_box_corners[:, 0], gt_box_corners[:, 3], gt_box_corners[:, 7], gt_box_corners[:, 4]], [gt_box_corners[:, 1], gt_box_corners[:, 5], gt_box_corners[:, 6], gt_box_corners[:, 2]], [gt_box_corners[:, 0], gt_box_corners[:, 4], gt_box_corners[:, 5], gt_box_corners[:, 1]], [gt_box_corners[:, 3], gt_box_corners[:, 2], gt_box_corners[:, 6], gt_box_corners[:, 7]]]).transpose([2, 0, 1, 3])
gt_surfaces = surfaces
voxel_corners_flat = voxel_corners.reshape([-1, 3])
ret = points_in_convex_polygon_3d_jit(voxel_corners_flat, gt_surfaces)
ret = ret.reshape([-1, 8, ret.shape[-1]])
return ret.any(-1).any(-1).astype(np.int64)
|
CenterPoint
|
positive
|
def forward(self, x):
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
fea1_1 = self.lrelu(self.conv0_0(x1))
fea1_2 = self.lrelu(self.conv0_0(x2))
fea = torch.cat([fea1_1, fea1_2], dim=1)
fea = self.lrelu(self.bn0_1(self.conv0_1_2ims(fea)))
<DeepExtract>
fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))
fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))
fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))
fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))
fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))
fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))
fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))
fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))
fea = self.pool(fea)
fea = fea.view(fea.size(0), -1)
fea = self.lrelu(self.linear1(fea))
out = self.linear2(fea)
out = out
</DeepExtract>
return out
|
def forward(self, x):
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
fea1_1 = self.lrelu(self.conv0_0(x1))
fea1_2 = self.lrelu(self.conv0_0(x2))
fea = torch.cat([fea1_1, fea1_2], dim=1)
fea = self.lrelu(self.bn0_1(self.conv0_1_2ims(fea)))
fea = self.lrelu(self.bn1_0(self.conv1_0(fea)))
fea = self.lrelu(self.bn1_1(self.conv1_1(fea)))
fea = self.lrelu(self.bn2_0(self.conv2_0(fea)))
fea = self.lrelu(self.bn2_1(self.conv2_1(fea)))
fea = self.lrelu(self.bn3_0(self.conv3_0(fea)))
fea = self.lrelu(self.bn3_1(self.conv3_1(fea)))
fea = self.lrelu(self.bn4_0(self.conv4_0(fea)))
fea = self.lrelu(self.bn4_1(self.conv4_1(fea)))
fea = self.pool(fea)
fea = fea.view(fea.size(0), -1)
fea = self.lrelu(self.linear1(fea))
out = self.linear2(fea)
out = out
return out
|
deep-landscape
|
positive
|
def attach_image_to_event(event_content: str) -> str:
"""Get a link to the suitable image of a given token content.
Args:
event_content (str): The event content.
Returns:
str: The link to the suitable image of a given token content.
"""
event_tokens = word_tokenize(event_content)
for token in event_tokens:
if token.isalnum():
try:
base_word = lemmatize(remove_non_alphabet_chars(token).lower())
except ValueError:
base_word = token
if base_word in IMAGES_RELATED_WORDS_MAP.values():
return generate_flare_link_from_lemmatized_word(base_word)
<DeepExtract>
key = get_image_name(token)
if key:
link = generate_flare_link_from_lemmatized_word(key)
</DeepExtract>
if link:
return link
link = '#'
return link
|
def attach_image_to_event(event_content: str) -> str:
"""Get a link to the suitable image of a given token content.
Args:
event_content (str): The event content.
Returns:
str: The link to the suitable image of a given token content.
"""
event_tokens = word_tokenize(event_content)
for token in event_tokens:
if token.isalnum():
try:
base_word = lemmatize(remove_non_alphabet_chars(token).lower())
except ValueError:
base_word = token
if base_word in IMAGES_RELATED_WORDS_MAP.values():
return generate_flare_link_from_lemmatized_word(base_word)
key = get_image_name(token)
if key:
link = generate_flare_link_from_lemmatized_word(key)
if link:
return link
link = '#'
return link
|
calendar
|
positive
|
def rnd_data(self, buf_size, full_buf_size):
<DeepExtract>
if full_buf_size >= self.server_info.buffer_size:
data_len = 0
tcp_mss = self.server_info.tcp_mss
rev_len = tcp_mss - buf_size - 9
if rev_len == 0:
data_len = 0
if rev_len < 0:
if rev_len > -tcp_mss:
data_len = self.trapezoid_random_int(rev_len + tcp_mss, -0.3)
data_len = common.ord(os.urandom(1)[0]) % 32
if buf_size > 900:
data_len = struct.unpack('>H', os.urandom(2))[0] % rev_len
data_len = self.trapezoid_random_int(rev_len, -0.3)
</DeepExtract>
if data_len < 128:
return common.chr(data_len + 1) + os.urandom(data_len)
return common.chr(255) + struct.pack('<H', data_len + 1) + os.urandom(data_len - 2)
|
def rnd_data(self, buf_size, full_buf_size):
if full_buf_size >= self.server_info.buffer_size:
data_len = 0
tcp_mss = self.server_info.tcp_mss
rev_len = tcp_mss - buf_size - 9
if rev_len == 0:
data_len = 0
if rev_len < 0:
if rev_len > -tcp_mss:
data_len = self.trapezoid_random_int(rev_len + tcp_mss, -0.3)
data_len = common.ord(os.urandom(1)[0]) % 32
if buf_size > 900:
data_len = struct.unpack('>H', os.urandom(2))[0] % rev_len
data_len = self.trapezoid_random_int(rev_len, -0.3)
if data_len < 128:
return common.chr(data_len + 1) + os.urandom(data_len)
return common.chr(255) + struct.pack('<H', data_len + 1) + os.urandom(data_len - 2)
|
Dockerfiles
|
positive
|
def __init__(self, dtype=np.complex64, vlen=1, raster_length=10000, select_start=0, select_length=None, nagg=1, agg_op='take', agg_op_args=(0,), max_raster_length=None, max_select_length=None, max_nagg=None):
"""Select data from a periodic raster window and optionally aggregate.
The input data is provided as samples with length `vlen` and type
`dtype`. It is then divided into raster windows with a number of
samples equal to `raster_length`. Within and relative to each raster
window, samples are selected to be output using `select_start` and
`select_length`. The output rasters can optionally be aggregated
together from `nagg` outputs to one using the specified operation.
The advantage of a raster of data is that its size can be changed in
a running flowgraph.
Parameters
----------
dtype : np.dtype
Data type of the input and output data.
vlen : int
Vector length of the *input* data (NOT the output vector length).
raster_length : int
Length of the raster window.
select_start : int
Index relative to the start of the raster window that indicates the
start of the output raster.
select_length : int
Number of samples to include in the selection from the raster
window. The equivalent indexing of the raster window would then be
``raster[select_start:(select_start + select_length)]``. If None,
then the length of entire remaining raster window from
`select_start` will be used.
nagg : int
Number of output rasters to aggregate together. The output is thus
downsampled by `nagg` in whole chunks of the selected raster
window.
agg_op : str
String giving the name of a numpy array method to use for the
aggregation operation. For `nagg` output rasters organized as an
``(nagg, select_length, vlen)``-shaped array called ``selections``,
the aggregation operation would then be
``selections.agg_op(*agg_op_args, axis=0)``.
agg_op_args : tuple
Positional arguments to be passed to the aggregation operation
method specified by `agg_op`. See above.
Other Parameters
----------------
max_raster_length : int
Maximum possible raster length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`raster_length` will be used.
max_select_length : int
Maximum possible selection length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`select_length` will be used.
max_nagg : int
Maximum possible output aggregation, to allow for changes while the
block is running. Knowing the maximum aggregation size allows for
allocation of appropriately-sized buffers. If None, a default of
four times the initial `nagg` will be used.
"""
if max_raster_length is None:
max_raster_length = 4 * raster_length
if max_select_length is None:
length = raster_length if select_length is None else select_length
max_select_length = 4 * length
if max_nagg is None:
max_nagg = 4 * nagg
gr.basic_block.__init__(self, name='Raster Select', in_sig=[(dtype, vlen)], out_sig=[(dtype, vlen)])
self._dtype = dtype
self._vlen = vlen
self._max_raster_length = max_raster_length
self._max_select_length = max_select_length
self._max_nagg = max_nagg
<DeepExtract>
self._agg_op = agg_op
</DeepExtract>
<DeepExtract>
self._agg_op_args = agg_op_args
</DeepExtract>
<DeepExtract>
self._next_raster_length = max_raster_length
self._params_set = False
</DeepExtract>
<DeepExtract>
self._next_select_start = 0
self._params_set = False
</DeepExtract>
<DeepExtract>
self._next_select_length = max_select_length
self._params_set = False
self._rate_set = False
</DeepExtract>
<DeepExtract>
self._next_nagg = max_nagg
self._params_set = False
</DeepExtract>
<DeepExtract>
if not self._params_set:
self._set_params()
return True
else:
return False
</DeepExtract>
<DeepExtract>
self._next_raster_length = raster_length
self._params_set = False
</DeepExtract>
<DeepExtract>
self._next_select_start = select_start
self._params_set = False
</DeepExtract>
<DeepExtract>
self._next_select_length = select_length
self._params_set = False
self._rate_set = False
</DeepExtract>
<DeepExtract>
self._next_nagg = nagg
self._params_set = False
</DeepExtract>
self.set_tag_propagation_policy(gr.TPP_DONT)
|
def __init__(self, dtype=np.complex64, vlen=1, raster_length=10000, select_start=0, select_length=None, nagg=1, agg_op='take', agg_op_args=(0,), max_raster_length=None, max_select_length=None, max_nagg=None):
"""Select data from a periodic raster window and optionally aggregate.
The input data is provided as samples with length `vlen` and type
`dtype`. It is then divided into raster windows with a number of
samples equal to `raster_length`. Within and relative to each raster
window, samples are selected to be output using `select_start` and
`select_length`. The output rasters can optionally be aggregated
together from `nagg` outputs to one using the specified operation.
The advantage of a raster of data is that its size can be changed in
a running flowgraph.
Parameters
----------
dtype : np.dtype
Data type of the input and output data.
vlen : int
Vector length of the *input* data (NOT the output vector length).
raster_length : int
Length of the raster window.
select_start : int
Index relative to the start of the raster window that indicates the
start of the output raster.
select_length : int
Number of samples to include in the selection from the raster
window. The equivalent indexing of the raster window would then be
``raster[select_start:(select_start + select_length)]``. If None,
then the length of entire remaining raster window from
`select_start` will be used.
nagg : int
Number of output rasters to aggregate together. The output is thus
downsampled by `nagg` in whole chunks of the selected raster
window.
agg_op : str
String giving the name of a numpy array method to use for the
aggregation operation. For `nagg` output rasters organized as an
``(nagg, select_length, vlen)``-shaped array called ``selections``,
the aggregation operation would then be
``selections.agg_op(*agg_op_args, axis=0)``.
agg_op_args : tuple
Positional arguments to be passed to the aggregation operation
method specified by `agg_op`. See above.
Other Parameters
----------------
max_raster_length : int
Maximum possible raster length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`raster_length` will be used.
max_select_length : int
Maximum possible selection length, to allow for changes while the
block is running. Knowing the maximum length allows for allocation
of appropriately-sized buffers. If None, four times the initial
`select_length` will be used.
max_nagg : int
Maximum possible output aggregation, to allow for changes while the
block is running. Knowing the maximum aggregation size allows for
allocation of appropriately-sized buffers. If None, a default of
four times the initial `nagg` will be used.
"""
if max_raster_length is None:
max_raster_length = 4 * raster_length
if max_select_length is None:
length = raster_length if select_length is None else select_length
max_select_length = 4 * length
if max_nagg is None:
max_nagg = 4 * nagg
gr.basic_block.__init__(self, name='Raster Select', in_sig=[(dtype, vlen)], out_sig=[(dtype, vlen)])
self._dtype = dtype
self._vlen = vlen
self._max_raster_length = max_raster_length
self._max_select_length = max_select_length
self._max_nagg = max_nagg
self._agg_op = agg_op
self._agg_op_args = agg_op_args
self._next_raster_length = max_raster_length
self._params_set = False
self._next_select_start = 0
self._params_set = False
self._next_select_length = max_select_length
self._params_set = False
self._rate_set = False
self._next_nagg = max_nagg
self._params_set = False
if not self._params_set:
self._set_params()
return True
else:
return False
self._next_raster_length = raster_length
self._params_set = False
self._next_select_start = select_start
self._params_set = False
self._next_select_length = select_length
self._params_set = False
self._rate_set = False
self._next_nagg = nagg
self._params_set = False
self.set_tag_propagation_policy(gr.TPP_DONT)
|
digital_rf
|
positive
|
def test_real_iso_latin_document(self):
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacré bleu!</p></body></html>'
iso_latin_html = unicode_html.encode('iso-8859-1')
<DeepExtract>
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(iso_latin_html, builder=builder, **kwargs)
</DeepExtract>
result = soup.encode('utf-8')
expected = unicode_html.replace('ISO-Latin-1', 'utf-8')
expected = expected.encode('utf-8')
self.assertEqual(result, expected)
|
def test_real_iso_latin_document(self):
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacré bleu!</p></body></html>'
iso_latin_html = unicode_html.encode('iso-8859-1')
builder = kwargs.pop('builder', self.default_builder)
soup = BeautifulSoup(iso_latin_html, builder=builder, **kwargs)
result = soup.encode('utf-8')
expected = unicode_html.replace('ISO-Latin-1', 'utf-8')
expected = expected.encode('utf-8')
self.assertEqual(result, expected)
|
BeautifulSoup4
|
positive
|
def get_portfolio(self):
<DeepExtract>
for perf_period in self.perf_periods:
perf_period.calculate_performance()
</DeepExtract>
return self.cumulative_performance.as_portfolio()
|
def get_portfolio(self):
for perf_period in self.perf_periods:
perf_period.calculate_performance()
return self.cumulative_performance.as_portfolio()
|
AlephNull
|
positive
|
def logs_bloom(logs: Tuple[Log, ...]) -> Bloom:
"""
Obtain the logs bloom from a list of log entries.
The address and each topic of a log are added to the bloom filter.
Parameters
----------
logs :
List of logs for which the logs bloom is to be obtained.
Returns
-------
logs_bloom : `Bloom`
The logs bloom obtained which is 256 bytes with some bits set as per
the caller address and the log topics.
"""
bloom: bytearray = bytearray(b'\x00' * 256)
for log in logs:
<DeepExtract>
hash = keccak256(log.address)
for idx in (0, 2, 4):
bit_to_set = Uint.from_be_bytes(hash[idx:idx + 2]) & 2047
bit_index = 2047 - bit_to_set
byte_index = bit_index // 8
bit_value = 1 << 7 - bit_index % 8
bloom[byte_index] = bloom[byte_index] | bit_value
</DeepExtract>
for topic in log.topics:
<DeepExtract>
hash = keccak256(topic)
for idx in (0, 2, 4):
bit_to_set = Uint.from_be_bytes(hash[idx:idx + 2]) & 2047
bit_index = 2047 - bit_to_set
byte_index = bit_index // 8
bit_value = 1 << 7 - bit_index % 8
bloom[byte_index] = bloom[byte_index] | bit_value
</DeepExtract>
return Bloom(bloom)
|
def logs_bloom(logs: Tuple[Log, ...]) -> Bloom:
"""
Obtain the logs bloom from a list of log entries.
The address and each topic of a log are added to the bloom filter.
Parameters
----------
logs :
List of logs for which the logs bloom is to be obtained.
Returns
-------
logs_bloom : `Bloom`
The logs bloom obtained which is 256 bytes with some bits set as per
the caller address and the log topics.
"""
bloom: bytearray = bytearray(b'\x00' * 256)
for log in logs:
hash = keccak256(log.address)
for idx in (0, 2, 4):
bit_to_set = Uint.from_be_bytes(hash[idx:idx + 2]) & 2047
bit_index = 2047 - bit_to_set
byte_index = bit_index // 8
bit_value = 1 << 7 - bit_index % 8
bloom[byte_index] = bloom[byte_index] | bit_value
for topic in log.topics:
hash = keccak256(topic)
for idx in (0, 2, 4):
bit_to_set = Uint.from_be_bytes(hash[idx:idx + 2]) & 2047
bit_index = 2047 - bit_to_set
byte_index = bit_index // 8
bit_value = 1 << 7 - bit_index % 8
bloom[byte_index] = bloom[byte_index] | bit_value
return Bloom(bloom)
|
eth1.0-specs
|
positive
|
def forward(self, C, Q, Cmask, Qmask):
<DeepExtract>
C = F.dropout(C, p=self.dropout, training=self.training)
Q = F.dropout(Q, p=self.dropout, training=self.training)
max_q_len = Q.size(-2)
max_context_len = C.size(-2)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, max_q_len])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, max_context_len, -1])
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
res = res * Qmask.unsqueeze(1) * Cmask.unsqueeze(-1)
S = res
</DeepExtract>
Cmask = Cmask.unsqueeze(-1)
Qmask = Qmask.unsqueeze(1)
<DeepExtract>
S = torch.clamp(S, min=-15.0, max=15.0)
if Qmask is not None:
Qmask = Qmask.float()
S = S * Qmask
e_x = torch.exp(S - torch.max(S, dim=2, keepdim=True)[0])
if Qmask is not None:
e_x = e_x * Qmask
softmax = e_x / (torch.sum(e_x, dim=2, keepdim=True) + 1e-06)
S1 = softmax
</DeepExtract>
<DeepExtract>
S = torch.clamp(S, min=-15.0, max=15.0)
if Cmask is not None:
Cmask = Cmask.float()
S = S * Cmask
e_x = torch.exp(S - torch.max(S, dim=1, keepdim=True)[0])
if Cmask is not None:
e_x = e_x * Cmask
softmax = e_x / (torch.sum(e_x, dim=1, keepdim=True) + 1e-06)
S2 = softmax
</DeepExtract>
S1 = S1 * Qmask * Cmask
S2 = S2 * Qmask * Cmask
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out
|
def forward(self, C, Q, Cmask, Qmask):
C = F.dropout(C, p=self.dropout, training=self.training)
Q = F.dropout(Q, p=self.dropout, training=self.training)
max_q_len = Q.size(-2)
max_context_len = C.size(-2)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, max_q_len])
subres1 = torch.matmul(Q, self.w4Q).transpose(1, 2).expand([-1, max_context_len, -1])
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
res = res * Qmask.unsqueeze(1) * Cmask.unsqueeze(-1)
S = res
Cmask = Cmask.unsqueeze(-1)
Qmask = Qmask.unsqueeze(1)
S = torch.clamp(S, min=-15.0, max=15.0)
if Qmask is not None:
Qmask = Qmask.float()
S = S * Qmask
e_x = torch.exp(S - torch.max(S, dim=2, keepdim=True)[0])
if Qmask is not None:
e_x = e_x * Qmask
softmax = e_x / (torch.sum(e_x, dim=2, keepdim=True) + 1e-06)
S1 = softmax
S = torch.clamp(S, min=-15.0, max=15.0)
if Cmask is not None:
Cmask = Cmask.float()
S = S * Cmask
e_x = torch.exp(S - torch.max(S, dim=1, keepdim=True)[0])
if Cmask is not None:
e_x = e_x * Cmask
softmax = e_x / (torch.sum(e_x, dim=1, keepdim=True) + 1e-06)
S2 = softmax
S1 = S1 * Qmask * Cmask
S2 = S2 * Qmask * Cmask
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out
|
alfworld
|
positive
|
def forward(self, mol_batch, x_tree_vecs):
(pred_hiddens, pred_contexts, pred_targets) = ([], [], [])
(stop_hiddens, stop_contexts, stop_targets) = ([], [], [])
traces = []
for mol_tree in mol_batch:
s = []
<DeepExtract>
for y in mol_tree.nodes[0].neighbors:
if y.idx == -1:
continue
s.append((mol_tree.nodes[0], y, 1))
dfs(s, y, mol_tree.nodes[0].idx)
s.append((y, mol_tree.nodes[0], 0))
</DeepExtract>
traces.append(s)
for node in mol_tree.nodes:
node.neighbors = []
batch_size = len(mol_batch)
pred_hiddens.append(create_var(torch.zeros(len(mol_batch), self.hidden_size)))
pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch])
pred_contexts.append(create_var(torch.LongTensor(range(batch_size))))
max_iter = max([len(tr) for tr in traces])
padding = create_var(torch.zeros(self.hidden_size), False)
h = {}
for t in range(max_iter):
prop_list = []
batch_list = []
for (i, plist) in enumerate(traces):
if t < len(plist):
prop_list.append(plist[t])
batch_list.append(i)
cur_x = []
(cur_h_nei, cur_o_nei) = ([], [])
for (node_x, real_y, _) in prop_list:
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors if node_y.idx != real_y.idx]
pad_len = MAX_NB - len(cur_nei)
cur_h_nei.extend(cur_nei)
cur_h_nei.extend([padding] * pad_len)
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x.append(node_x.wid)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
(pred_target, pred_list) = ([], [])
stop_target = []
for (i, m) in enumerate(prop_list):
(node_x, node_y, direction) = m
(x, y) = (node_x.idx, node_y.idx)
h[x, y] = new_h[i]
node_y.neighbors.append(node_x)
if direction == 1:
pred_target.append(node_y.wid)
pred_list.append(i)
stop_target.append(direction)
cur_batch = create_var(torch.LongTensor(batch_list))
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(cur_batch)
stop_targets.extend(stop_target)
if len(pred_list) > 0:
batch_list = [batch_list[i] for i in pred_list]
cur_batch = create_var(torch.LongTensor(batch_list))
pred_contexts.append(cur_batch)
cur_pred = create_var(torch.LongTensor(pred_list))
pred_hiddens.append(new_h.index_select(0, cur_pred))
pred_targets.extend(pred_target)
(cur_x, cur_o_nei) = ([], [])
for mol_tree in mol_batch:
node_x = mol_tree.nodes[0]
cur_x.append(node_x.wid)
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(create_var(torch.LongTensor(range(batch_size))))
stop_targets.extend([0] * len(mol_batch))
pred_contexts = torch.cat(pred_contexts, dim=0)
pred_hiddens = torch.cat(pred_hiddens, dim=0)
<DeepExtract>
if 'word' == 'word':
(V, V_o) = (self.W, self.W_o)
elif 'word' == 'stop':
(V, V_o) = (self.U, self.U_o)
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, pred_contexts)
input_vec = torch.cat([pred_hiddens, tree_contexts], dim=-1)
output_vec = F.relu(V(input_vec))
pred_scores = V_o(output_vec)
</DeepExtract>
pred_targets = create_var(torch.LongTensor(pred_targets))
pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_batch)
(_, preds) = torch.max(pred_scores, dim=1)
pred_acc = torch.eq(preds, pred_targets).float()
pred_acc = torch.sum(pred_acc) / pred_targets.nelement()
stop_contexts = torch.cat(stop_contexts, dim=0)
stop_hiddens = torch.cat(stop_hiddens, dim=0)
stop_hiddens = F.relu(self.U_i(stop_hiddens))
<DeepExtract>
if 'stop' == 'word':
(V, V_o) = (self.W, self.W_o)
elif 'stop' == 'stop':
(V, V_o) = (self.U, self.U_o)
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, stop_contexts)
input_vec = torch.cat([stop_hiddens, tree_contexts], dim=-1)
output_vec = F.relu(V(input_vec))
stop_scores = V_o(output_vec)
</DeepExtract>
stop_scores = stop_scores.squeeze(-1)
stop_targets = create_var(torch.Tensor(stop_targets))
stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_batch)
stops = torch.ge(stop_scores, 0).float()
stop_acc = torch.eq(stops, stop_targets).float()
stop_acc = torch.sum(stop_acc) / stop_targets.nelement()
return (pred_loss, stop_loss, pred_acc.item(), stop_acc.item())
|
def forward(self, mol_batch, x_tree_vecs):
(pred_hiddens, pred_contexts, pred_targets) = ([], [], [])
(stop_hiddens, stop_contexts, stop_targets) = ([], [], [])
traces = []
for mol_tree in mol_batch:
s = []
for y in mol_tree.nodes[0].neighbors:
if y.idx == -1:
continue
s.append((mol_tree.nodes[0], y, 1))
dfs(s, y, mol_tree.nodes[0].idx)
s.append((y, mol_tree.nodes[0], 0))
traces.append(s)
for node in mol_tree.nodes:
node.neighbors = []
batch_size = len(mol_batch)
pred_hiddens.append(create_var(torch.zeros(len(mol_batch), self.hidden_size)))
pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch])
pred_contexts.append(create_var(torch.LongTensor(range(batch_size))))
max_iter = max([len(tr) for tr in traces])
padding = create_var(torch.zeros(self.hidden_size), False)
h = {}
for t in range(max_iter):
prop_list = []
batch_list = []
for (i, plist) in enumerate(traces):
if t < len(plist):
prop_list.append(plist[t])
batch_list.append(i)
cur_x = []
(cur_h_nei, cur_o_nei) = ([], [])
for (node_x, real_y, _) in prop_list:
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors if node_y.idx != real_y.idx]
pad_len = MAX_NB - len(cur_nei)
cur_h_nei.extend(cur_nei)
cur_h_nei.extend([padding] * pad_len)
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x.append(node_x.wid)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_h_nei = torch.stack(cur_h_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
(pred_target, pred_list) = ([], [])
stop_target = []
for (i, m) in enumerate(prop_list):
(node_x, node_y, direction) = m
(x, y) = (node_x.idx, node_y.idx)
h[x, y] = new_h[i]
node_y.neighbors.append(node_x)
if direction == 1:
pred_target.append(node_y.wid)
pred_list.append(i)
stop_target.append(direction)
cur_batch = create_var(torch.LongTensor(batch_list))
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(cur_batch)
stop_targets.extend(stop_target)
if len(pred_list) > 0:
batch_list = [batch_list[i] for i in pred_list]
cur_batch = create_var(torch.LongTensor(batch_list))
pred_contexts.append(cur_batch)
cur_pred = create_var(torch.LongTensor(pred_list))
pred_hiddens.append(new_h.index_select(0, cur_pred))
pred_targets.extend(pred_target)
(cur_x, cur_o_nei) = ([], [])
for mol_tree in mol_batch:
node_x = mol_tree.nodes[0]
cur_x.append(node_x.wid)
cur_nei = [h[node_y.idx, node_x.idx] for node_y in node_x.neighbors]
pad_len = MAX_NB - len(cur_nei)
cur_o_nei.extend(cur_nei)
cur_o_nei.extend([padding] * pad_len)
cur_x = create_var(torch.LongTensor(cur_x))
cur_x = self.embedding(cur_x)
cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1, MAX_NB, self.hidden_size)
cur_o = cur_o_nei.sum(dim=1)
stop_hidden = torch.cat([cur_x, cur_o], dim=1)
stop_hiddens.append(stop_hidden)
stop_contexts.append(create_var(torch.LongTensor(range(batch_size))))
stop_targets.extend([0] * len(mol_batch))
pred_contexts = torch.cat(pred_contexts, dim=0)
pred_hiddens = torch.cat(pred_hiddens, dim=0)
if 'word' == 'word':
(V, V_o) = (self.W, self.W_o)
elif 'word' == 'stop':
(V, V_o) = (self.U, self.U_o)
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, pred_contexts)
input_vec = torch.cat([pred_hiddens, tree_contexts], dim=-1)
output_vec = F.relu(V(input_vec))
pred_scores = V_o(output_vec)
pred_targets = create_var(torch.LongTensor(pred_targets))
pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_batch)
(_, preds) = torch.max(pred_scores, dim=1)
pred_acc = torch.eq(preds, pred_targets).float()
pred_acc = torch.sum(pred_acc) / pred_targets.nelement()
stop_contexts = torch.cat(stop_contexts, dim=0)
stop_hiddens = torch.cat(stop_hiddens, dim=0)
stop_hiddens = F.relu(self.U_i(stop_hiddens))
if 'stop' == 'word':
(V, V_o) = (self.W, self.W_o)
elif 'stop' == 'stop':
(V, V_o) = (self.U, self.U_o)
else:
raise ValueError('aggregate mode is wrong')
tree_contexts = x_tree_vecs.index_select(0, stop_contexts)
input_vec = torch.cat([stop_hiddens, tree_contexts], dim=-1)
output_vec = F.relu(V(input_vec))
stop_scores = V_o(output_vec)
stop_scores = stop_scores.squeeze(-1)
stop_targets = create_var(torch.Tensor(stop_targets))
stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_batch)
stops = torch.ge(stop_scores, 0).float()
stop_acc = torch.eq(stops, stop_targets).float()
stop_acc = torch.sum(stop_acc) / stop_targets.nelement()
return (pred_loss, stop_loss, pred_acc.item(), stop_acc.item())
|
DIG
|
positive
|
def getDockerCommand(image, cmd):
"""
Takes in a command (as a list of arguments like ['halStats',
'file']) and outputs another list of arguments that will run it in
the given Docker container, binding directories when necessary.
image: the Docker image to use, e.g. 'quay.io/comparative-genomics-toolkit/cactus:latest'
cmd: list of arguments
"""
dockerPreamble = ['docker', 'run', '-i', '--rm', '-u', '%s:%s' % (os.getuid(), os.getgid())]
if 'TMPDIR' in os.environ:
tmpdir = os.environ['TMPDIR']
dockerPreamble.extend(['--env', 'TMPDIR={}'.format(tmpdir)])
dockerPreamble.extend(['-v', tmpdir + ':' + tmpdir])
work_dirs = []
for (i, arg) in enumerate(cmd):
if arg.startswith('-') and '=' in arg:
arg = arg.split('=')[1]
dirname = os.path.dirname(arg)
if os.path.exists(dirname):
arg = os.path.abspath(arg)
if arg.startswith('/dev'):
continue
<DeepExtract>
if not work_dirs:
work_dirs.append(dirname)
else:
for (i, work_dir) in enumerate(work_dirs):
mrca = mrca_path(dirname, work_dir)
if mrca == '/':
if i == len(work_dirs) - 1:
work_dirs.append(dirname)
else:
work_dirs[i] = mrca
</DeepExtract>
for work_dir in work_dirs:
work_dir = os.path.abspath(work_dir)
dockerPreamble += ['-v', work_dir + ':' + work_dir]
return dockerPreamble + [image] + cmd
|
def getDockerCommand(image, cmd):
"""
Takes in a command (as a list of arguments like ['halStats',
'file']) and outputs another list of arguments that will run it in
the given Docker container, binding directories when necessary.
image: the Docker image to use, e.g. 'quay.io/comparative-genomics-toolkit/cactus:latest'
cmd: list of arguments
"""
dockerPreamble = ['docker', 'run', '-i', '--rm', '-u', '%s:%s' % (os.getuid(), os.getgid())]
if 'TMPDIR' in os.environ:
tmpdir = os.environ['TMPDIR']
dockerPreamble.extend(['--env', 'TMPDIR={}'.format(tmpdir)])
dockerPreamble.extend(['-v', tmpdir + ':' + tmpdir])
work_dirs = []
for (i, arg) in enumerate(cmd):
if arg.startswith('-') and '=' in arg:
arg = arg.split('=')[1]
dirname = os.path.dirname(arg)
if os.path.exists(dirname):
arg = os.path.abspath(arg)
if arg.startswith('/dev'):
continue
if not work_dirs:
work_dirs.append(dirname)
else:
for (i, work_dir) in enumerate(work_dirs):
mrca = mrca_path(dirname, work_dir)
if mrca == '/':
if i == len(work_dirs) - 1:
work_dirs.append(dirname)
else:
work_dirs[i] = mrca
for work_dir in work_dirs:
work_dir = os.path.abspath(work_dir)
dockerPreamble += ['-v', work_dir + ':' + work_dir]
return dockerPreamble + [image] + cmd
|
Comparative-Annotation-Toolkit
|
positive
|
def __init__(self, host, port, enable_ssl=False):
"""Initialize this environment from the URL specified
@param host: The host to connect to
@type host: str
@param port: The port on the host to connect to
@type port: int
@param enable_ssl: True if we should use HTTPS, otherwise False
@type enable_ssl: bool
"""
if enable_ssl:
from httplib import HTTPSConnection as Connection
else:
from httplib import HTTPConnection as Connection
self.conn = Connection(host, port)
self.host = host
self.port = port
self.enable_ssl = enable_ssl
self.auth_header = None
import commands
import re
output = commands.getstatusoutput('security find-internet-password -gs %s' % self.host)
if output[0] == 0:
for l in output[1].split('\n'):
matches = re.match('password: "(.+?)"', str(l))
if matches:
password = matches.group(1)
matches = re.match('\\s+?"acct"<blob>="(.+?)"', str(l))
if matches:
username = matches.group(1)
<DeepExtract>
import base64
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
self.auth_header = 'Basic %s' % base64string
</DeepExtract>
<DeepExtract>
self.routes = {}
resp = self.request('GET', '/')
assert resp.status == 200
tree = etree.parse(resp)
root = tree.getroot()
assert root.tag == 'Index'
self.app_name = root.get('name')
for node in root:
if node.tag == 'api':
self.routes[node.get('name')] = node.xpath('href/text()')[0]
</DeepExtract>
|
def __init__(self, host, port, enable_ssl=False):
"""Initialize this environment from the URL specified
@param host: The host to connect to
@type host: str
@param port: The port on the host to connect to
@type port: int
@param enable_ssl: True if we should use HTTPS, otherwise False
@type enable_ssl: bool
"""
if enable_ssl:
from httplib import HTTPSConnection as Connection
else:
from httplib import HTTPConnection as Connection
self.conn = Connection(host, port)
self.host = host
self.port = port
self.enable_ssl = enable_ssl
self.auth_header = None
import commands
import re
output = commands.getstatusoutput('security find-internet-password -gs %s' % self.host)
if output[0] == 0:
for l in output[1].split('\n'):
matches = re.match('password: "(.+?)"', str(l))
if matches:
password = matches.group(1)
matches = re.match('\\s+?"acct"<blob>="(.+?)"', str(l))
if matches:
username = matches.group(1)
import base64
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
self.auth_header = 'Basic %s' % base64string
self.routes = {}
resp = self.request('GET', '/')
assert resp.status == 200
tree = etree.parse(resp)
root = tree.getroot()
assert root.tag == 'Index'
self.app_name = root.get('name')
for node in root:
if node.tag == 'api':
self.routes[node.get('name')] = node.xpath('href/text()')[0]
</DeepExtract>
|
botoweb
|
positive
|
def notify(msg):
"""Show a popup-notification"""
import importlib
if importlib.util.find_spec('plyer'):
<DeepExtract>
from bleachbit import bleachbit_exe_path
__icon_fns = (os.path.normpath(os.path.join(bleachbit_exe_path, 'share\\bleachbit.ico')), os.path.normpath(os.path.join(bleachbit_exe_path, 'windows\\bleachbit.ico')))
icon_fn = None
for __icon_fn in __icon_fns:
if os.path.exists(__icon_fn):
icon_fn = __icon_fn
break
from plyer import notification
notification.notify(title=APP_NAME, message=msg, app_name=APP_NAME, app_icon=icon_fn)
</DeepExtract>
return
<DeepExtract>
gi.require_version('Notify', '0.7')
from gi.repository import Notify
if Notify.init(APP_NAME):
notify = Notify.Notification.new('BleachBit', msg, 'bleachbit')
notify.set_hint('desktop-entry', GLib.Variant('s', 'bleachbit'))
notify.show()
notify.set_timeout(10000)
</DeepExtract>
|
def notify(msg):
"""Show a popup-notification"""
import importlib
if importlib.util.find_spec('plyer'):
from bleachbit import bleachbit_exe_path
__icon_fns = (os.path.normpath(os.path.join(bleachbit_exe_path, 'share\\bleachbit.ico')), os.path.normpath(os.path.join(bleachbit_exe_path, 'windows\\bleachbit.ico')))
icon_fn = None
for __icon_fn in __icon_fns:
if os.path.exists(__icon_fn):
icon_fn = __icon_fn
break
from plyer import notification
notification.notify(title=APP_NAME, message=msg, app_name=APP_NAME, app_icon=icon_fn)
return
gi.require_version('Notify', '0.7')
from gi.repository import Notify
if Notify.init(APP_NAME):
notify = Notify.Notification.new('BleachBit', msg, 'bleachbit')
notify.set_hint('desktop-entry', GLib.Variant('s', 'bleachbit'))
notify.show()
notify.set_timeout(10000)
</DeepExtract>
|
bleachbit
|
positive
|
def overall_scores(self, logger=None):
<DeepExtract>
confs = self._confs
pixels = self._pixels
num_classes = pixels.shape[0]
x_num = pixels.shape[1]
class_pixels = pixels.sum(1)
class_pixels += class_pixels == 0
scores = confs[xrange(num_classes), xrange(num_classes), :].sum(1)
acc = scores.sum() / pixels.sum()
cls_accs = scores / class_pixels
class_preds = confs.sum(0).sum(1)
ious = scores / (class_pixels + class_preds - scores)
logger = self._logger if logger is None else logger
if logger is not None:
if None is not None:
speed = 1.0 * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(None + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}pixel acc: {:.2f}%, mean acc: {:.2f}%, mean iou: {:.2f}%'.format(name, acc * 100, cls_accs.mean() * 100, ious.mean() * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(cls_accs * 100))
logger.info('\n{}'.format(ious * 100))
(acc, cls_accs, ious) = (acc, cls_accs, ious)
</DeepExtract>
return (acc, cls_accs.mean(), ious.mean())
|
def overall_scores(self, logger=None):
confs = self._confs
pixels = self._pixels
num_classes = pixels.shape[0]
x_num = pixels.shape[1]
class_pixels = pixels.sum(1)
class_pixels += class_pixels == 0
scores = confs[xrange(num_classes), xrange(num_classes), :].sum(1)
acc = scores.sum() / pixels.sum()
cls_accs = scores / class_pixels
class_preds = confs.sum(0).sum(1)
ious = scores / (class_pixels + class_preds - scores)
logger = self._logger if logger is None else logger
if logger is not None:
if None is not None:
speed = 1.0 * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(None + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}pixel acc: {:.2f}%, mean acc: {:.2f}%, mean iou: {:.2f}%'.format(name, acc * 100, cls_accs.mean() * 100, ious.mean() * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(cls_accs * 100))
logger.info('\n{}'.format(ious * 100))
(acc, cls_accs, ious) = (acc, cls_accs, ious)
return (acc, cls_accs.mean(), ious.mean())
|
CBST
|
positive
|
def factory_unit_type(self):
"""The unit type currently being produced by the factory.
* InappropriateUnitType - the unit is not a factory.
* NullValue - the factory is not producing.
:type self: Unit
:rtype: UnitType
"""
result = _lib.bc_Unit_factory_unit_type(self._ptr)
<DeepExtract>
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
</DeepExtract>
result = UnitType(result)
return result
|
def factory_unit_type(self):
"""The unit type currently being produced by the factory.
* InappropriateUnitType - the unit is not a factory.
* NullValue - the factory is not producing.
:type self: Unit
:rtype: UnitType
"""
result = _lib.bc_Unit_factory_unit_type(self._ptr)
if _lib.bc_has_err():
_lasterror = _ffi.new('char**')
err = _lib.bc_get_last_err(_lasterror)
errtext = _ffi.string(_lasterror[0])
_lib.bc_free_string(_lasterror[0])
raise Exception(errtext)
result = UnitType(result)
return result
|
bc18-scaffold
|
positive
|
def get_msg_type(self):
"""
get msg type
"""
if self._type is None:
<DeepExtract>
num = 0
b_ind = 0
for i in str_data:
num += pow(256, b_ind) * ord(i)
b_ind += 1
self._type = num
</DeepExtract>
return self._type
|
def get_msg_type(self):
"""
get msg type
"""
if self._type is None:
num = 0
b_ind = 0
for i in str_data:
num += pow(256, b_ind) * ord(i)
b_ind += 1
self._type = num
return self._type
|
CUP
|
positive
|
@pytest.mark.parametrize(['instring', 'expected_true'], [('run.foo=True and model.a_b=0', True), ('run.foo=False And model.a_b=0', False), ('run.foo=True AND model.a_b=1', False), ('run.foo=False and model.a_b=1', False), ('run.foo=1 and model.a_b=0', True)])
def test_imasking_and(self, parse_where_string, instring, expected_true):
<DeepExtract>
def _parse_where_string(imasking_string):
parsed_ = imasking.parse_string(imasking_string, parse_all=True)
evaluated_ = parsed_[0].eval(**instring)
evaluated_ = _parse_where_string
</DeepExtract>
assert evaluated_ if expected_true else not evaluated_
|
@pytest.mark.parametrize(['instring', 'expected_true'], [('run.foo=True and model.a_b=0', True), ('run.foo=False And model.a_b=0', False), ('run.foo=True AND model.a_b=1', False), ('run.foo=False and model.a_b=1', False), ('run.foo=1 and model.a_b=0', True)])
def test_imasking_and(self, parse_where_string, instring, expected_true):
def _parse_where_string(imasking_string):
parsed_ = imasking.parse_string(imasking_string, parse_all=True)
evaluated_ = parsed_[0].eval(**instring)
evaluated_ = _parse_where_string
assert evaluated_ if expected_true else not evaluated_
|
calliope
|
positive
|
def evaluate(queries: List[EvaluationQuery], output_file='eval_out.log'):
"""Evaluates the queries.
Returns a tuple of EvaluationResult and the list of queries. Each
query is enhanced with information on false_positives
and false_negatives.
:type queries: list[EvaluationQuery]
:rtype: (EvaluationResult, list[EvaluationQuery])
:param queries:
:return:
"""
EvaluationResult = namedtuple('EvaluationResult', ['avg_precision', 'avg_recall', 'avg_f1', 'parse_acc', 'macro_f1', 'macro_f1_xao', 'avg_precision_xao', 'precision_kw', 'recall_kw', 'f1_kw', 'num_questions', 'num_questions_no_answer', 'accuracy', 'oracle_accuracy', 'oracle_avg_f1', 'oracle_parse_acc', 'oracle_top_2', 'oracle_top_3', 'oracle_top_5', 'oracle_top_10', 'oracle_top_100', 'avg_oracle_position', 'avg_num_candidates'])
num_q_no_answer = 0
num_candidates = 0
for query in queries:
query.reset_results()
gold_targets_list = query.targets_mids_or_names
candidates = query.eval_candidates
if not gold_targets_list:
num_q_no_answer += 1
if not gold_targets_list and (not candidates):
query.precision = 1.0
query.recall = 1.0
query.f1 = 1.0
query.parse_score = 1.0
query.parse_match = True
query.oracle_f1 = 1.0
query.oracle_parse_score = 1.0
query.oracle_parse_match = True
query.false_negatives = []
query.false_positives = []
if gold_targets_list and (not candidates):
query.precision = 0.0
query.recall = 0.0
query.f1 = 0.0
query.parse_score = 0.0
query.parse_match = False
query.false_negatives = gold_targets_list
query.false_positives = []
else:
num_candidates += len(candidates)
for (i, prediction) in enumerate(candidates):
best_candidate_eval = prediction.evaluation_result
if not best_candidate_eval:
<DeepExtract>
candidate_results = []
parse_matches = []
best_parse_match = 0.0
if query.target_parses:
for parse in query.target_parses:
parse_matches.append(compute_parse_match(prediction, parse))
best_parse_match = max(parse_matches)
gold_targets_list = query.targets_mids_or_names
gold_targets_sets = [parse_to_set(targets) for targets in gold_targets_list]
prediction_set = parse_to_set(prediction.prediction_mids_or_names)
logger.debug('prediction_set: %r', prediction_set)
for (results_num, gold_targets_set) in enumerate(gold_targets_sets):
logger.debug('gold_targets_set: %r', gold_targets_set)
true_positives = 0.0
false_positives = []
false_negatives = []
if len(prediction.prediction_names) == len(gold_targets_list[results_num]) and len(gold_targets_set) != len(prediction_set):
logger.debug('Result set has different size than result list.')
num_gold = len(gold_targets_set)
num_predicted = len(prediction_set)
for res in prediction_set:
if res in gold_targets_set:
true_positives += 1.0
gold_targets_set.remove(res)
else:
false_positives.append(res)
false_negatives.extend(gold_targets_set)
if num_gold == 0:
if num_predicted == 0:
precision = 1.0
recall = 1.0
f1 = 1.0
else:
precision = 0.0
recall = 0.0
f1 = 0.0
else:
if num_predicted == 0:
precision = 0.0
else:
precision = true_positives / float(num_predicted)
recall = true_positives / float(num_gold)
f1 = 0.0
if precision + recall > 0:
f1 = 2.0 * precision * recall / (precision + recall)
if f1 > 0.99:
logger.debug('Perfect match: %s = %s.', prediction.prediction_names, gold_targets_list[results_num])
candidate_results.append(CandidateEvaluationResult(precision, recall, f1, best_parse_match, false_positives, false_negatives))
candidate_evals = candidate_results
</DeepExtract>
best_candidate_eval = max(candidate_evals, key=lambda ev: ev.f1)
best_candidate_eval_parse = max(candidate_evals, key=lambda ev: ev.parse_score)
prediction.evaluation_result = best_candidate_eval
if i == 0:
query.precision = best_candidate_eval.precision
query.recall = best_candidate_eval.recall
query.f1 = best_candidate_eval.f1
query.parse_score = best_candidate_eval.parse_score
query.parse_match = query.parse_score > 0.99
query.false_negatives = best_candidate_eval.false_negatives
query.false_positives = best_candidate_eval.false_positives
if query.oracle_f1 < best_candidate_eval.f1:
query.oracle_f1 = best_candidate_eval.f1
query.oracle_position = i + 1
if query.oracle_parse_score < best_candidate_eval_parse.parse_score:
query.oracle_parse_score = best_candidate_eval.parse_score
query.oracle_parse_match = best_candidate_eval.parse_score > 0.99
query.oracle_parse_position = i + 1
num_queries = len(queries)
num_unanswered_queries = float(len([q for q in queries if not q.eval_candidates]))
num_answered_queries = float(len([q for q in queries if q.eval_candidates]))
completely_correct = float(len([q for q in queries if q.f1 > 0.99]))
oracle_positions = [q.oracle_position for q in queries if q.oracle_position > 0]
avg_oracle_position = sum(oracle_positions) / float(len(oracle_positions))
oracle_top_2 = len([p for p in oracle_positions if p <= 2])
oracle_top_3 = len([p for p in oracle_positions if p <= 3])
oracle_top_5 = len([p for p in oracle_positions if p <= 5])
oracle_top_10 = len([p for p in oracle_positions if p <= 10])
oracle_top_100 = len([p for p in oracle_positions if p <= 100])
perfect_with_oracle = len([q for q in queries if q.oracle_f1 > 0.99])
oracle_accuracy = perfect_with_oracle / num_queries
oracle_top_2f = oracle_top_2 / num_queries
oracle_top_3f = oracle_top_3 / num_queries
oracle_top_5f = oracle_top_5 / num_queries
oracle_top_10f = oracle_top_10 / num_queries
oracle_top_100f = oracle_top_100 / num_queries
average_f1 = sum([q.f1 for q in queries]) / num_queries
oracle_average_f1 = sum([q.oracle_f1 for q in queries]) / num_queries
average_recall = sum([q.recall for q in queries]) / num_queries
average_precision = sum([q.precision for q in queries]) / num_queries
parse_accuracy = len([1 for q in queries if q.parse_match]) / num_queries
oracle_parse_accuracy = len([1 for q in queries if q.oracle_parse_match]) / num_queries
macro_f1 = 0.0
if average_precision + average_recall > 0:
macro_f1 = 2 * average_precision * average_recall / (average_precision + average_recall)
accuracy = float(completely_correct) / num_queries
average_precision_xao = sum([q.precision for q in queries]) / (num_queries - num_unanswered_queries)
macro_f1_xao = 0
if average_precision_xao + average_recall > 0:
macro_f1_xao = 2 * average_precision_xao * average_recall / (average_precision_xao + average_recall)
precision_kw = completely_correct / num_answered_queries
recall_kw = completely_correct / num_queries
f1_kw = 0.0
if precision_kw + recall_kw > 0:
f1_kw = 2 * precision_kw * recall_kw / (precision_kw + recall_kw)
avg_num_candidates = float(num_candidates) / num_queries
overall_result = EvaluationResult(average_precision, average_recall, average_f1, parse_accuracy, macro_f1, macro_f1_xao, average_precision_xao, precision_kw, recall_kw, f1_kw, num_queries, num_q_no_answer, accuracy, oracle_accuracy, oracle_average_f1, oracle_parse_accuracy, oracle_top_2f, oracle_top_3f, oracle_top_5f, oracle_top_10f, oracle_top_100f, avg_oracle_position, avg_num_candidates)
if output_file:
<DeepExtract>
logger.info('Writing results to %s.' % output_file)
with open(output_file, 'w', encoding='utf-8') as f:
for q in queries:
q_text = q.utterance
if q.targets_names:
result_text = json.dumps(q.targets_names[0])
else:
result_text = json.dumps(q.targets_mids[0])
actual_result = []
if q.eval_candidates:
actual_result = q.eval_candidates[0].prediction_names
actual_result_text = json.dumps(actual_result)
f.write('%s\t%s\t%s\n' % (q_text, result_text, actual_result_text))
</DeepExtract>
return (overall_result, queries)
|
def evaluate(queries: List[EvaluationQuery], output_file='eval_out.log'):
"""Evaluates the queries.
Returns a tuple of EvaluationResult and the list of queries. Each
query is enhanced with information on false_positives
and false_negatives.
:type queries: list[EvaluationQuery]
:rtype: (EvaluationResult, list[EvaluationQuery])
:param queries:
:return:
"""
EvaluationResult = namedtuple('EvaluationResult', ['avg_precision', 'avg_recall', 'avg_f1', 'parse_acc', 'macro_f1', 'macro_f1_xao', 'avg_precision_xao', 'precision_kw', 'recall_kw', 'f1_kw', 'num_questions', 'num_questions_no_answer', 'accuracy', 'oracle_accuracy', 'oracle_avg_f1', 'oracle_parse_acc', 'oracle_top_2', 'oracle_top_3', 'oracle_top_5', 'oracle_top_10', 'oracle_top_100', 'avg_oracle_position', 'avg_num_candidates'])
num_q_no_answer = 0
num_candidates = 0
for query in queries:
query.reset_results()
gold_targets_list = query.targets_mids_or_names
candidates = query.eval_candidates
if not gold_targets_list:
num_q_no_answer += 1
if not gold_targets_list and (not candidates):
query.precision = 1.0
query.recall = 1.0
query.f1 = 1.0
query.parse_score = 1.0
query.parse_match = True
query.oracle_f1 = 1.0
query.oracle_parse_score = 1.0
query.oracle_parse_match = True
query.false_negatives = []
query.false_positives = []
if gold_targets_list and (not candidates):
query.precision = 0.0
query.recall = 0.0
query.f1 = 0.0
query.parse_score = 0.0
query.parse_match = False
query.false_negatives = gold_targets_list
query.false_positives = []
else:
num_candidates += len(candidates)
for (i, prediction) in enumerate(candidates):
best_candidate_eval = prediction.evaluation_result
if not best_candidate_eval:
candidate_results = []
parse_matches = []
best_parse_match = 0.0
if query.target_parses:
for parse in query.target_parses:
parse_matches.append(compute_parse_match(prediction, parse))
best_parse_match = max(parse_matches)
gold_targets_list = query.targets_mids_or_names
gold_targets_sets = [parse_to_set(targets) for targets in gold_targets_list]
prediction_set = parse_to_set(prediction.prediction_mids_or_names)
logger.debug('prediction_set: %r', prediction_set)
for (results_num, gold_targets_set) in enumerate(gold_targets_sets):
logger.debug('gold_targets_set: %r', gold_targets_set)
true_positives = 0.0
false_positives = []
false_negatives = []
if len(prediction.prediction_names) == len(gold_targets_list[results_num]) and len(gold_targets_set) != len(prediction_set):
logger.debug('Result set has different size than result list.')
num_gold = len(gold_targets_set)
num_predicted = len(prediction_set)
for res in prediction_set:
if res in gold_targets_set:
true_positives += 1.0
gold_targets_set.remove(res)
else:
false_positives.append(res)
false_negatives.extend(gold_targets_set)
if num_gold == 0:
if num_predicted == 0:
precision = 1.0
recall = 1.0
f1 = 1.0
else:
precision = 0.0
recall = 0.0
f1 = 0.0
else:
if num_predicted == 0:
precision = 0.0
else:
precision = true_positives / float(num_predicted)
recall = true_positives / float(num_gold)
f1 = 0.0
if precision + recall > 0:
f1 = 2.0 * precision * recall / (precision + recall)
if f1 > 0.99:
logger.debug('Perfect match: %s = %s.', prediction.prediction_names, gold_targets_list[results_num])
candidate_results.append(CandidateEvaluationResult(precision, recall, f1, best_parse_match, false_positives, false_negatives))
candidate_evals = candidate_results
best_candidate_eval = max(candidate_evals, key=lambda ev: ev.f1)
best_candidate_eval_parse = max(candidate_evals, key=lambda ev: ev.parse_score)
prediction.evaluation_result = best_candidate_eval
if i == 0:
query.precision = best_candidate_eval.precision
query.recall = best_candidate_eval.recall
query.f1 = best_candidate_eval.f1
query.parse_score = best_candidate_eval.parse_score
query.parse_match = query.parse_score > 0.99
query.false_negatives = best_candidate_eval.false_negatives
query.false_positives = best_candidate_eval.false_positives
if query.oracle_f1 < best_candidate_eval.f1:
query.oracle_f1 = best_candidate_eval.f1
query.oracle_position = i + 1
if query.oracle_parse_score < best_candidate_eval_parse.parse_score:
query.oracle_parse_score = best_candidate_eval.parse_score
query.oracle_parse_match = best_candidate_eval.parse_score > 0.99
query.oracle_parse_position = i + 1
num_queries = len(queries)
num_unanswered_queries = float(len([q for q in queries if not q.eval_candidates]))
num_answered_queries = float(len([q for q in queries if q.eval_candidates]))
completely_correct = float(len([q for q in queries if q.f1 > 0.99]))
oracle_positions = [q.oracle_position for q in queries if q.oracle_position > 0]
avg_oracle_position = sum(oracle_positions) / float(len(oracle_positions))
oracle_top_2 = len([p for p in oracle_positions if p <= 2])
oracle_top_3 = len([p for p in oracle_positions if p <= 3])
oracle_top_5 = len([p for p in oracle_positions if p <= 5])
oracle_top_10 = len([p for p in oracle_positions if p <= 10])
oracle_top_100 = len([p for p in oracle_positions if p <= 100])
perfect_with_oracle = len([q for q in queries if q.oracle_f1 > 0.99])
oracle_accuracy = perfect_with_oracle / num_queries
oracle_top_2f = oracle_top_2 / num_queries
oracle_top_3f = oracle_top_3 / num_queries
oracle_top_5f = oracle_top_5 / num_queries
oracle_top_10f = oracle_top_10 / num_queries
oracle_top_100f = oracle_top_100 / num_queries
average_f1 = sum([q.f1 for q in queries]) / num_queries
oracle_average_f1 = sum([q.oracle_f1 for q in queries]) / num_queries
average_recall = sum([q.recall for q in queries]) / num_queries
average_precision = sum([q.precision for q in queries]) / num_queries
parse_accuracy = len([1 for q in queries if q.parse_match]) / num_queries
oracle_parse_accuracy = len([1 for q in queries if q.oracle_parse_match]) / num_queries
macro_f1 = 0.0
if average_precision + average_recall > 0:
macro_f1 = 2 * average_precision * average_recall / (average_precision + average_recall)
accuracy = float(completely_correct) / num_queries
average_precision_xao = sum([q.precision for q in queries]) / (num_queries - num_unanswered_queries)
macro_f1_xao = 0
if average_precision_xao + average_recall > 0:
macro_f1_xao = 2 * average_precision_xao * average_recall / (average_precision_xao + average_recall)
precision_kw = completely_correct / num_answered_queries
recall_kw = completely_correct / num_queries
f1_kw = 0.0
if precision_kw + recall_kw > 0:
f1_kw = 2 * precision_kw * recall_kw / (precision_kw + recall_kw)
avg_num_candidates = float(num_candidates) / num_queries
overall_result = EvaluationResult(average_precision, average_recall, average_f1, parse_accuracy, macro_f1, macro_f1_xao, average_precision_xao, precision_kw, recall_kw, f1_kw, num_queries, num_q_no_answer, accuracy, oracle_accuracy, oracle_average_f1, oracle_parse_accuracy, oracle_top_2f, oracle_top_3f, oracle_top_5f, oracle_top_10f, oracle_top_100f, avg_oracle_position, avg_num_candidates)
if output_file:
logger.info('Writing results to %s.' % output_file)
with open(output_file, 'w', encoding='utf-8') as f:
for q in queries:
q_text = q.utterance
if q.targets_names:
result_text = json.dumps(q.targets_names[0])
else:
result_text = json.dumps(q.targets_mids[0])
actual_result = []
if q.eval_candidates:
actual_result = q.eval_candidates[0].prediction_names
actual_result_text = json.dumps(actual_result)
f.write('%s\t%s\t%s\n' % (q_text, result_text, actual_result_text))
return (overall_result, queries)
|
aqqu
|
positive
|
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None, head_mask=None):
(qlen, bsz) = (w.size(0), w.size(1))
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
rw_head_q = w_head_q + r_w_bias[None]
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb))
D_ = r_bias[None, :, None]
<DeepExtract>
zero_pad_shape = (B_ + D_.size(0), 1) + B_ + D_.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=B_ + D_.device, dtype=B_ + D_.dtype)
x_padded = torch.cat([zero_pad, B_ + D_], dim=1)
x_padded_shape = (B_ + D_.size(1) + 1, B_ + D_.size(0)) + B_ + D_.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
B_ + D_ = x_padded[1:].view_as(B_ + D_)
if zero_triu:
ones = torch.ones((B_ + D_.size(0), B_ + D_.size(1)))
B_ + D_ = B_ + D_ * torch.tril(ones, B_ + D_.size(1) - B_ + D_.size(0))[:, :, None, None]
BD = B_ + D_
</DeepExtract>
attn_score = AC + BD
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
if head_mask is not None:
attn_prob = attn_prob * head_mask
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
outputs = [w + attn_out]
else:
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
|
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None, head_mask=None):
(qlen, bsz) = (w.size(0), w.size(1))
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
(w_head_q, w_head_k, w_head_v) = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
rw_head_q = w_head_q + r_w_bias[None]
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k))
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb))
D_ = r_bias[None, :, None]
zero_pad_shape = (B_ + D_.size(0), 1) + B_ + D_.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=B_ + D_.device, dtype=B_ + D_.dtype)
x_padded = torch.cat([zero_pad, B_ + D_], dim=1)
x_padded_shape = (B_ + D_.size(1) + 1, B_ + D_.size(0)) + B_ + D_.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
B_ + D_ = x_padded[1:].view_as(B_ + D_)
if zero_triu:
ones = torch.ones((B_ + D_.size(0), B_ + D_.size(1)))
B_ + D_ = B_ + D_ * torch.tril(ones, B_ + D_.size(1) - B_ + D_.size(0))[:, :, None, None]
BD = B_ + D_
attn_score = AC + BD
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
if head_mask is not None:
attn_prob = attn_prob * head_mask
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
outputs = [w + attn_out]
else:
outputs = [self.layer_norm(w + attn_out)]
if self.output_attentions:
outputs.append(attn_prob)
return outputs
|
BERT-CRF
|
positive
|
def test(scale=2):
image_size = (2560, 2560)
output_size = (256, 1024)
input_size = (image_size[0] // scale, image_size[1] // scale)
m = Model(input_size, output_size, 1 / scale).cuda()
beziers = [[]]
im_arrs = []
down_scales = []
imgfile = '1019.jpg'
im = Image.open('tools/tests/imgs/' + imgfile)
(w, h) = im.size
<DeepExtract>
w_ratio = w / image_size[1]
h_ratio = h / image_size[0]
down_scale = max(w_ratio, h_ratio)
if down_scale > 1:
down_scale = down_scale
else:
down_scale = 1
</DeepExtract>
down_scales.append(down_scale)
if down_scale > 1:
im = im.resize((int(w / down_scale), int(h / down_scale)), Image.ANTIALIAS)
(w, h) = im.size
padding = (0, 0, image_size[1] - w, image_size[0] - h)
im = ImageOps.expand(im, padding)
im = im.resize((input_size[1], input_size[0]), Image.ANTIALIAS)
im_arrs.append(np.array(im))
cps = [152.0, 209.0, 134.1, 34.18, 365.69, 66.2, 377.0, 206.0, 345.0, 214.0, 334.31, 109.71, 190.03, 80.12, 203.0, 214.0]
cps = np.array(cps)[[1, 0, 3, 2, 5, 4, 7, 6, 15, 14, 13, 12, 11, 10, 9, 8]]
beziers[0].append(cps)
beziers = [torch.from_numpy(np.stack(b)).cuda().float() for b in beziers]
beziers = [b / d for (b, d) in zip(beziers, down_scales)]
im_arrs = np.stack(im_arrs)
x = torch.from_numpy(im_arrs).permute(0, 3, 1, 2).cuda().float()
x = m(x, beziers)
for (i, roi) in enumerate(x):
roi = roi.cpu().detach().numpy().transpose(1, 2, 0).astype(np.uint8)
im = Image.fromarray(roi, 'RGB')
im.save('roi_1103.png')
loss = x.mean()
loss.backward()
print(m)
|
def test(scale=2):
image_size = (2560, 2560)
output_size = (256, 1024)
input_size = (image_size[0] // scale, image_size[1] // scale)
m = Model(input_size, output_size, 1 / scale).cuda()
beziers = [[]]
im_arrs = []
down_scales = []
imgfile = '1019.jpg'
im = Image.open('tools/tests/imgs/' + imgfile)
(w, h) = im.size
w_ratio = w / image_size[1]
h_ratio = h / image_size[0]
down_scale = max(w_ratio, h_ratio)
if down_scale > 1:
down_scale = down_scale
else:
down_scale = 1
down_scales.append(down_scale)
if down_scale > 1:
im = im.resize((int(w / down_scale), int(h / down_scale)), Image.ANTIALIAS)
(w, h) = im.size
padding = (0, 0, image_size[1] - w, image_size[0] - h)
im = ImageOps.expand(im, padding)
im = im.resize((input_size[1], input_size[0]), Image.ANTIALIAS)
im_arrs.append(np.array(im))
cps = [152.0, 209.0, 134.1, 34.18, 365.69, 66.2, 377.0, 206.0, 345.0, 214.0, 334.31, 109.71, 190.03, 80.12, 203.0, 214.0]
cps = np.array(cps)[[1, 0, 3, 2, 5, 4, 7, 6, 15, 14, 13, 12, 11, 10, 9, 8]]
beziers[0].append(cps)
beziers = [torch.from_numpy(np.stack(b)).cuda().float() for b in beziers]
beziers = [b / d for (b, d) in zip(beziers, down_scales)]
im_arrs = np.stack(im_arrs)
x = torch.from_numpy(im_arrs).permute(0, 3, 1, 2).cuda().float()
x = m(x, beziers)
for (i, roi) in enumerate(x):
roi = roi.cpu().detach().numpy().transpose(1, 2, 0).astype(np.uint8)
im = Image.fromarray(roi, 'RGB')
im.save('roi_1103.png')
loss = x.mean()
loss.backward()
print(m)
|
bezier_curve_text_spotting
|
positive
|
def _key_callback(key_data_packed):
global _key_handlers
<DeepExtract>
key_data = EFI_KEY_DATA()
if key_data_packed & 1 << 16:
key_data.Key.ScanCode = key_data_packed & 65535
else:
key_data.Key.UnicodeChar = chr(key_data_packed & 65535)
key_data.KeyState.KeyShiftState = EFI_SHIFT_STATE_VALID | key_data_packed >> 17 & 1023
if key_data_packed & 1 << 28:
key_data.KeyState.KeyToggleState |= EFI_SCROLL_LOCK_ACTIVE
if key_data_packed & 1 << 29:
key_data.KeyState.KeyToggleState |= EFI_NUM_LOCK_ACTIVE
if key_data_packed & 1 << 30:
key_data.KeyState.KeyToggleState |= EFI_CAPS_LOCK_ACTIVE
if key_data_packed & 1 << 31:
key_data.KeyState.KeyToggleState |= EFI_KEY_STATE_EXPOSED
key_data.KeyState.KeyToggleState |= EFI_TOGGLE_STATE_VALID
key_data = key_data
</DeepExtract>
shift = 0
if key_data.KeyState.KeyShiftState & EFI_SHIFT_STATE_VALID:
shift = key_data.KeyState.KeyShiftState & ~EFI_SHIFT_STATE_VALID
_key_handlers[key_data.Key.ScanCode, key_data.Key.UnicodeChar, shift][1]()
|
def _key_callback(key_data_packed):
global _key_handlers
key_data = EFI_KEY_DATA()
if key_data_packed & 1 << 16:
key_data.Key.ScanCode = key_data_packed & 65535
else:
key_data.Key.UnicodeChar = chr(key_data_packed & 65535)
key_data.KeyState.KeyShiftState = EFI_SHIFT_STATE_VALID | key_data_packed >> 17 & 1023
if key_data_packed & 1 << 28:
key_data.KeyState.KeyToggleState |= EFI_SCROLL_LOCK_ACTIVE
if key_data_packed & 1 << 29:
key_data.KeyState.KeyToggleState |= EFI_NUM_LOCK_ACTIVE
if key_data_packed & 1 << 30:
key_data.KeyState.KeyToggleState |= EFI_CAPS_LOCK_ACTIVE
if key_data_packed & 1 << 31:
key_data.KeyState.KeyToggleState |= EFI_KEY_STATE_EXPOSED
key_data.KeyState.KeyToggleState |= EFI_TOGGLE_STATE_VALID
key_data = key_data
shift = 0
if key_data.KeyState.KeyShiftState & EFI_SHIFT_STATE_VALID:
shift = key_data.KeyState.KeyShiftState & ~EFI_SHIFT_STATE_VALID
_key_handlers[key_data.Key.ScanCode, key_data.Key.UnicodeChar, shift][1]()
|
bits
|
positive
|
def main():
<DeepExtract>
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
args = args
</DeepExtract>
devkit_path = args.devkit_path
aug_path = args.aug_path
nproc = args.nproc
if args.out_dir is None:
out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
else:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
in_dir = osp.join(aug_path, 'dataset', 'cls')
mmcv.track_parallel_progress(partial(convert_mat, in_dir=in_dir, out_dir=out_dir), list(mmcv.scandir(in_dir, suffix='.mat')), nproc=nproc)
full_aug_list = []
with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'train.txt')) as f:
ori_train_list = [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'val.txt')) as f:
val_list = [line.strip() for line in f]
<DeepExtract>
aug_train_list = list(set(ori_train_list + full_aug_list) - set(val_list))
</DeepExtract>
assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(AUG_LEN)
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'trainaug.txt'), 'w') as f:
f.writelines((line + '\n' for line in aug_train_list))
<DeepExtract>
aug_list = list(set(full_aug_list) - set(ori_train_list + val_list))
</DeepExtract>
assert len(aug_list) == AUG_LEN - len(ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN - len(ori_train_list))
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'), 'w') as f:
f.writelines((line + '\n' for line in aug_list))
print('Done!')
|
def main():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
args = args
devkit_path = args.devkit_path
aug_path = args.aug_path
nproc = args.nproc
if args.out_dir is None:
out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
else:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
in_dir = osp.join(aug_path, 'dataset', 'cls')
mmcv.track_parallel_progress(partial(convert_mat, in_dir=in_dir, out_dir=out_dir), list(mmcv.scandir(in_dir, suffix='.mat')), nproc=nproc)
full_aug_list = []
with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:
full_aug_list += [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'train.txt')) as f:
ori_train_list = [line.strip() for line in f]
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'val.txt')) as f:
val_list = [line.strip() for line in f]
aug_train_list = list(set(ori_train_list + full_aug_list) - set(val_list))
assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(AUG_LEN)
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'trainaug.txt'), 'w') as f:
f.writelines((line + '\n' for line in aug_train_list))
aug_list = list(set(full_aug_list) - set(ori_train_list + val_list))
assert len(aug_list) == AUG_LEN - len(ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN - len(ori_train_list))
with open(osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'), 'w') as f:
f.writelines((line + '\n' for line in aug_list))
print('Done!')
|
Auto-Seg-Loss
|
positive
|
def __fisheye_camera_params(scene, engine, aspect_ratio, film_width, film_height):
camera = self.bl_camera
<DeepExtract>
camera = self.bl_camera
x_aspect_comp = 1 if aspect_ratio > 1 else 1 / aspect_ratio
y_aspect_comp = aspect_ratio if aspect_ratio > 1 else 1
cam_params = {'aspect_ratio': aspect_ratio, 'focal_length': camera.data.lens / 1000, 'film_dimensions': asr.Vector2f(film_width, film_height), 'near_z': camera.data.appleseed.near_z, 'shift_x': (engine.camera_shift_x(camera) + camera.data.shift_x) * x_aspect_comp * film_width, 'shift_y': camera.data.shift_y * y_aspect_comp * film_height, 'shutter_open_end_time': scene.appleseed.shutter_open_end_time, 'shutter_open_begin_time': scene.appleseed.shutter_open, 'shutter_close_begin_time': scene.appleseed.shutter_close_begin_time, 'shutter_close_end_time': scene.appleseed.shutter_close}
cam_params = cam_params
</DeepExtract>
cam_params.update({'projection_type': camera.data.appleseed.fisheye_projection_type})
return cam_params
|
def __fisheye_camera_params(scene, engine, aspect_ratio, film_width, film_height):
camera = self.bl_camera
camera = self.bl_camera
x_aspect_comp = 1 if aspect_ratio > 1 else 1 / aspect_ratio
y_aspect_comp = aspect_ratio if aspect_ratio > 1 else 1
cam_params = {'aspect_ratio': aspect_ratio, 'focal_length': camera.data.lens / 1000, 'film_dimensions': asr.Vector2f(film_width, film_height), 'near_z': camera.data.appleseed.near_z, 'shift_x': (engine.camera_shift_x(camera) + camera.data.shift_x) * x_aspect_comp * film_width, 'shift_y': camera.data.shift_y * y_aspect_comp * film_height, 'shutter_open_end_time': scene.appleseed.shutter_open_end_time, 'shutter_open_begin_time': scene.appleseed.shutter_open, 'shutter_close_begin_time': scene.appleseed.shutter_close_begin_time, 'shutter_close_end_time': scene.appleseed.shutter_close}
cam_params = cam_params
cam_params.update({'projection_type': camera.data.appleseed.fisheye_projection_type})
return cam_params
|
blenderseed
|
positive
|
def load_sysv_unit_conf(self, module):
""" read the unit file with a UnitConfParser (sysv) """
<DeepExtract>
self.scan_unit_sysv_files()
assert self._file_for_unit_sysv is not None
if module and module in self._file_for_unit_sysv:
path = self._file_for_unit_sysv[module]
if module and unit_of(module) in self._file_for_unit_sysv:
path = self._file_for_unit_sysv[unit_of(module)]
path = None
</DeepExtract>
if not path:
return None
assert self._loaded_file_sysv is not None
if path in self._loaded_file_sysv:
return self._loaded_file_sysv[path]
data = UnitConfParser()
data.read_sysv(path)
conf = SystemctlConf(data, module)
conf._root = self._root
self._loaded_file_sysv[path] = conf
return conf
|
def load_sysv_unit_conf(self, module):
""" read the unit file with a UnitConfParser (sysv) """
self.scan_unit_sysv_files()
assert self._file_for_unit_sysv is not None
if module and module in self._file_for_unit_sysv:
path = self._file_for_unit_sysv[module]
if module and unit_of(module) in self._file_for_unit_sysv:
path = self._file_for_unit_sysv[unit_of(module)]
path = None
if not path:
return None
assert self._loaded_file_sysv is not None
if path in self._loaded_file_sysv:
return self._loaded_file_sysv[path]
data = UnitConfParser()
data.read_sysv(path)
conf = SystemctlConf(data, module)
conf._root = self._root
self._loaded_file_sysv[path] = conf
return conf
|
docker-systemctl-images
|
positive
|
def prepare_or_wait_for_session(self, master='', config=None, wait_for_checkpoint=False, max_wait_secs=7200, start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(master, init_op=self.init_op, saver=self.saver, checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config, init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
<DeepExtract>
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, 'graph.pbtxt')
if self._summary_writer and (not self._graph_added_to_summary):
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
</DeepExtract>
if start_standard_services:
logging.info('Starting standard services.')
<DeepExtract>
if not self._is_chief:
raise RuntimeError('Only chief supervisor can start standard services. Because only chief supervisors can write events.')
if not self._logdir:
logging.warning("Standard services need a 'logdir' passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
</DeepExtract>
else:
sess = self._session_manager.wait_for_session(master, config=config, max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info('Starting queue runners.')
<DeepExtract>
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True))
return threads
</DeepExtract>
return sess
|
def prepare_or_wait_for_session(self, master='', config=None, wait_for_checkpoint=False, max_wait_secs=7200, start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(master, init_op=self.init_op, saver=self.saver, checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs, config=config, init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True), self._logdir, 'graph.pbtxt')
if self._summary_writer and (not self._graph_added_to_summary):
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
if start_standard_services:
logging.info('Starting standard services.')
if not self._is_chief:
raise RuntimeError('Only chief supervisor can start standard services. Because only chief supervisors can write events.')
if not self._logdir:
logging.warning("Standard services need a 'logdir' passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(SessionLog(status=SessionLog.START), current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
else:
sess = self._session_manager.wait_for_session(master, config=config, max_wait_secs=max_wait_secs)
if start_standard_services:
logging.info('Starting queue runners.')
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True))
return threads
return sess
|
ctw-baseline
|
positive
|
def get_episode(self, batch_size: Optional[int]=None, truncate_episode_at: Optional[int]=None) -> Tuple[EnvStep, Union[np.ndarray, tf.Tensor]]:
(env_steps, valid_steps) = self._dataset.get_episode(batch_size, truncate_episode_at)
<DeepExtract>
randoms = tf.gather(self._random_numbers, self._dataset.last_rows_read)
num_perturbations = self._num_perturbations or 1
perturbations = tf.cast(randoms[..., None] * tf.pow(2.0, 1 + tf.range(num_perturbations, dtype=tf.float32)), tf.int64)
perturbations = tf.cast(tf.math.mod(perturbations, 2), env_steps.reward.dtype) - 0.5
new_reward = env_steps.reward[..., None] + self._perturbation_scale * perturbations
if self._num_perturbations is None:
new_reward = tf.squeeze(new_reward, -1)
new_discount = env_steps.discount
else:
new_discount = env_steps.discount[..., None]
perturbed_steps = env_steps.write(reward=new_reward, discount=new_discount)
</DeepExtract>
return (perturbed_steps, valid_steps)
|
def get_episode(self, batch_size: Optional[int]=None, truncate_episode_at: Optional[int]=None) -> Tuple[EnvStep, Union[np.ndarray, tf.Tensor]]:
(env_steps, valid_steps) = self._dataset.get_episode(batch_size, truncate_episode_at)
randoms = tf.gather(self._random_numbers, self._dataset.last_rows_read)
num_perturbations = self._num_perturbations or 1
perturbations = tf.cast(randoms[..., None] * tf.pow(2.0, 1 + tf.range(num_perturbations, dtype=tf.float32)), tf.int64)
perturbations = tf.cast(tf.math.mod(perturbations, 2), env_steps.reward.dtype) - 0.5
new_reward = env_steps.reward[..., None] + self._perturbation_scale * perturbations
if self._num_perturbations is None:
new_reward = tf.squeeze(new_reward, -1)
new_discount = env_steps.discount
else:
new_discount = env_steps.discount[..., None]
perturbed_steps = env_steps.write(reward=new_reward, discount=new_discount)
return (perturbed_steps, valid_steps)
|
dice_rl
|
positive
|
def step5(data):
"""Step 5 of GISTEMP.
This step takes input provided by steps 3 and 4 (zipped together).
The usual generator of the *data* argument is gio.step5_input()
and this allows for various missing and/or synthesized inputs,
allowing just-land, just-ocean, override-weights.
:Param data:
*data* should be an iterable of (weight, land, ocean) triples. The
first triple is metadata (and this is a hack). Subsequently
there is one triple per subbox (of which, 8000).
"""
<DeepExtract>
meta = data.next()
(maskmeta, landmeta, oceanmeta) = meta
if maskmeta:
yield meta
for t in data:
yield t
else:
meta = list(meta)
meta[0] = 'mask computed in Step 5'
yield tuple(meta)
for (_, land, ocean) in data:
if ocean.good_count < parameters.subbox_min_valid or land.d < parameters.subbox_land_range:
landmask = 1.0
else:
landmask = 0.0
yield (landmask, land, ocean)
</DeepExtract>
subboxes = gio.step5_mask_output(subboxes)
<DeepExtract>
meta = subboxes.next()
(land, ocean, mixed) = land_ocean_boxes(meta, subboxes)
</DeepExtract>
result = []
for (meta, boxes) in [land, ocean, mixed]:
boxes = gio.step5_bx_output(meta, boxes)
<DeepExtract>
iyrbeg = meta.yrbeg
monm = meta.monm
(boxes_in_band, band_in_zone) = zones()
bands = len(boxes_in_band)
lenz = [None] * bands
wt = [None] * bands
avg = [None] * bands
for band in range(bands):
box_series = [None] * boxes_in_band[band]
box_weights = [None] * boxes_in_band[band]
box_length = [None] * boxes_in_band[band]
for box in range(boxes_in_band[band]):
(box_series[box], box_weights[box], box_length[box], _) = boxes.next()
total_length = sum(box_length)
if total_length == 0:
wt[band] = [0.0] * monm
avg[band] = [MISSING] * monm
else:
(box_length, IORD) = sort_perm(box_length)
nr = IORD[0]
wt[band] = list(box_weights[nr])
avg[band] = list(box_series[nr])
for n in range(1, boxes_in_band[band]):
nr = IORD[n]
if box_length[n] == 0:
break
series.combine(avg[band], wt[band], box_series[nr], box_weights[nr], parameters.box_min_overlap)
series.anomalize(avg[band], parameters.box_reference_period, iyrbeg)
lenz[band] = sum((valid(a) for a in avg[band]))
yield (avg[band], wt[band])
try:
boxes.next()
assert 0, 'Too many boxes found'
except StopIteration:
pass
(lenz, iord) = sort_perm(lenz)
for zone in range(len(band_in_zone)):
for j1 in range(bands):
if iord[j1] in band_in_zone[zone]:
break
else:
raise Exception('No band in compound zone %d.' % zone)
band = iord[j1]
if lenz[band] == 0:
print('**** NO DATA FOR ZONE %d' % band)
wtg = list(wt[band])
avgg = list(avg[band])
for j in range(j1 + 1, bands):
band = iord[j]
if band not in band_in_zone[zone]:
continue
series.combine(avgg, wtg, avg[band], wt[band], parameters.box_min_overlap)
series.anomalize(avgg, parameters.box_reference_period, iyrbeg)
yield (avgg, wtg)
</DeepExtract>
result.append(annzon(meta, zoned_averages))
return result
|
def step5(data):
"""Step 5 of GISTEMP.
This step takes input provided by steps 3 and 4 (zipped together).
The usual generator of the *data* argument is gio.step5_input()
and this allows for various missing and/or synthesized inputs,
allowing just-land, just-ocean, override-weights.
:Param data:
*data* should be an iterable of (weight, land, ocean) triples. The
first triple is metadata (and this is a hack). Subsequently
there is one triple per subbox (of which, 8000).
"""
meta = data.next()
(maskmeta, landmeta, oceanmeta) = meta
if maskmeta:
yield meta
for t in data:
yield t
else:
meta = list(meta)
meta[0] = 'mask computed in Step 5'
yield tuple(meta)
for (_, land, ocean) in data:
if ocean.good_count < parameters.subbox_min_valid or land.d < parameters.subbox_land_range:
landmask = 1.0
else:
landmask = 0.0
yield (landmask, land, ocean)
subboxes = gio.step5_mask_output(subboxes)
meta = subboxes.next()
(land, ocean, mixed) = land_ocean_boxes(meta, subboxes)
result = []
for (meta, boxes) in [land, ocean, mixed]:
boxes = gio.step5_bx_output(meta, boxes)
iyrbeg = meta.yrbeg
monm = meta.monm
(boxes_in_band, band_in_zone) = zones()
bands = len(boxes_in_band)
lenz = [None] * bands
wt = [None] * bands
avg = [None] * bands
for band in range(bands):
box_series = [None] * boxes_in_band[band]
box_weights = [None] * boxes_in_band[band]
box_length = [None] * boxes_in_band[band]
for box in range(boxes_in_band[band]):
(box_series[box], box_weights[box], box_length[box], _) = boxes.next()
total_length = sum(box_length)
if total_length == 0:
wt[band] = [0.0] * monm
avg[band] = [MISSING] * monm
else:
(box_length, IORD) = sort_perm(box_length)
nr = IORD[0]
wt[band] = list(box_weights[nr])
avg[band] = list(box_series[nr])
for n in range(1, boxes_in_band[band]):
nr = IORD[n]
if box_length[n] == 0:
break
series.combine(avg[band], wt[band], box_series[nr], box_weights[nr], parameters.box_min_overlap)
series.anomalize(avg[band], parameters.box_reference_period, iyrbeg)
lenz[band] = sum((valid(a) for a in avg[band]))
yield (avg[band], wt[band])
try:
boxes.next()
assert 0, 'Too many boxes found'
except StopIteration:
pass
(lenz, iord) = sort_perm(lenz)
for zone in range(len(band_in_zone)):
for j1 in range(bands):
if iord[j1] in band_in_zone[zone]:
break
else:
raise Exception('No band in compound zone %d.' % zone)
band = iord[j1]
if lenz[band] == 0:
print('**** NO DATA FOR ZONE %d' % band)
wtg = list(wt[band])
avgg = list(avg[band])
for j in range(j1 + 1, bands):
band = iord[j]
if band not in band_in_zone[zone]:
continue
series.combine(avgg, wtg, avg[band], wt[band], parameters.box_min_overlap)
series.anomalize(avgg, parameters.box_reference_period, iyrbeg)
yield (avgg, wtg)
result.append(annzon(meta, zoned_averages))
return result
|
ccc-gistemp
|
positive
|
def rename_files(in_caps_dwi: str, mapping: dict) -> tuple:
"""Rename files provided.
The new files are symbolic links to old files.
For this reason, the old files still exists after renaming.
Parameters
----------
in_caps_dwi : str
A DWI file from the CAPS folder.
This is used only to extract the BIDS identifier.
mapping : dict
Mapping between original file names and suffixes for
new file names.
Returns
-------
tuple :
New file names.
"""
import os
from nipype.interfaces.utility import Rename
from nipype.utils.filemanip import split_filename
<DeepExtract>
import re
m = re.search('(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+).*_dwi', in_caps_dwi)
if not m:
raise ValueError(f'Could not extract the BIDS identifier from the DWI input filename {in_caps_dwi}.')
bids_id = m.group(0)
</DeepExtract>
renamed_files = []
for (original_file, suffix) in mapping.items():
(base_dir, _, _) = split_filename(original_file)
rename = Rename()
rename.inputs.in_file = original_file
rename.inputs.format_string = os.path.join(base_dir, f'{bids_id}{suffix}')
renamed_files.append(rename.run().outputs.out_file)
return tuple(renamed_files)
|
def rename_files(in_caps_dwi: str, mapping: dict) -> tuple:
"""Rename files provided.
The new files are symbolic links to old files.
For this reason, the old files still exists after renaming.
Parameters
----------
in_caps_dwi : str
A DWI file from the CAPS folder.
This is used only to extract the BIDS identifier.
mapping : dict
Mapping between original file names and suffixes for
new file names.
Returns
-------
tuple :
New file names.
"""
import os
from nipype.interfaces.utility import Rename
from nipype.utils.filemanip import split_filename
import re
m = re.search('(sub-[a-zA-Z0-9]+)_(ses-[a-zA-Z0-9]+).*_dwi', in_caps_dwi)
if not m:
raise ValueError(f'Could not extract the BIDS identifier from the DWI input filename {in_caps_dwi}.')
bids_id = m.group(0)
renamed_files = []
for (original_file, suffix) in mapping.items():
(base_dir, _, _) = split_filename(original_file)
rename = Rename()
rename.inputs.in_file = original_file
rename.inputs.format_string = os.path.join(base_dir, f'{bids_id}{suffix}')
renamed_files.append(rename.run().outputs.out_file)
return tuple(renamed_files)
|
clinica
|
positive
|
def get_brute_force_protection(self):
"""Get the brute force configuration.
Returns the brute force configuration.
See: https://auth0.com/docs/api/management/v2#!/Attack_Protection/get_brute_force_protection
"""
<DeepExtract>
url = '{}://{}/api/v2/attack-protection/{}'.format(self.protocol, self.domain, 'brute-force-protection')
</DeepExtract>
return self.client.get(url)
|
def get_brute_force_protection(self):
"""Get the brute force configuration.
Returns the brute force configuration.
See: https://auth0.com/docs/api/management/v2#!/Attack_Protection/get_brute_force_protection
"""
url = '{}://{}/api/v2/attack-protection/{}'.format(self.protocol, self.domain, 'brute-force-protection')
return self.client.get(url)
|
auth0-python
|
positive
|
def get_blog_posts():
pageToken = None
posts = []
for i in itertools.count():
<DeepExtract>
params = {'orderBy': 'updated', 'fields': 'items(author/displayName,content,id,title,updated,url,labels),nextPageToken', 'key': api_key, 'maxResults': 50, 'fetchBodies': str(fetch_all).lower()}
if pageToken is not None:
params['pageToken'] = pageToken
j = query_blogger_api('%s/posts' % api_url, params)
</DeepExtract>
print('Fetched page %i (%i posts)...' % (i, len(j['items'])))
posts.extend(j['items'])
if 'nextPageToken' not in j:
break
pageToken = j['nextPageToken']
return posts
|
def get_blog_posts():
pageToken = None
posts = []
for i in itertools.count():
params = {'orderBy': 'updated', 'fields': 'items(author/displayName,content,id,title,updated,url,labels),nextPageToken', 'key': api_key, 'maxResults': 50, 'fetchBodies': str(fetch_all).lower()}
if pageToken is not None:
params['pageToken'] = pageToken
j = query_blogger_api('%s/posts' % api_url, params)
print('Fetched page %i (%i posts)...' % (i, len(j['items'])))
posts.extend(j['items'])
if 'nextPageToken' not in j:
break
pageToken = j['nextPageToken']
return posts
|
3d-fixes
|
positive
|
def main():
arguments = docopt(__doc__)
debug = arguments['--debug']
verbose = arguments['--verbose']
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
if verbose:
ch.setLevel(logging.INFO)
if debug:
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.info('{}{}'.format(ciftify.utils.ciftify_logo(), ciftify.utils.section_header('Starting ciftify_meants')))
ciftify.utils.log_arguments(arguments)
settings = UserSettings(arguments)
<DeepExtract>
if '.dlabel.nii' in settings.seed.path:
if settings.weighted:
logger.error('--weighted mean time-series cannot be calcualted with a .dlabel.nii seed. Exiting.')
sys.exit(1)
if settings.roi_label:
logger.error("Sorry, --roi-label option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if settings.mask:
logger.error("Sorry, --mask option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if not settings.func.type == 'cifti':
logger.error('If <seed> is .dlabel.nii, the <func> needs to be a cifti file. Exiting.')
sys.exit(1)
cifti_parcellate_to_meants(settings)
else:
_ = ciftify.meants.calc_meants_with_numpy(settings, outputlabels=settings.outputlabels)
</DeepExtract>
logger.info(ciftify.utils.section_header('Done ciftify_meants'))
sys.exit(ret)
|
def main():
arguments = docopt(__doc__)
debug = arguments['--debug']
verbose = arguments['--verbose']
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
if verbose:
ch.setLevel(logging.INFO)
if debug:
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
logger.info('{}{}'.format(ciftify.utils.ciftify_logo(), ciftify.utils.section_header('Starting ciftify_meants')))
ciftify.utils.log_arguments(arguments)
settings = UserSettings(arguments)
if '.dlabel.nii' in settings.seed.path:
if settings.weighted:
logger.error('--weighted mean time-series cannot be calcualted with a .dlabel.nii seed. Exiting.')
sys.exit(1)
if settings.roi_label:
logger.error("Sorry, --roi-label option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if settings.mask:
logger.error("Sorry, --mask option doesn't work for .dlabel.nii seed inputs. Exiting.")
sys.exit(1)
if not settings.func.type == 'cifti':
logger.error('If <seed> is .dlabel.nii, the <func> needs to be a cifti file. Exiting.')
sys.exit(1)
cifti_parcellate_to_meants(settings)
else:
_ = ciftify.meants.calc_meants_with_numpy(settings, outputlabels=settings.outputlabels)
logger.info(ciftify.utils.section_header('Done ciftify_meants'))
sys.exit(ret)
|
ciftify
|
positive
|
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
<DeepExtract>
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
</DeepExtract>
<DeepExtract>
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(['--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir])
preprocess.main(preprocess_args)
</DeepExtract>
<DeepExtract>
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, ['--task', 'translation', data_dir, '--save-dir', data_dir, '--arch', 'fconv_iwslt_de_en', '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--source-lang', 'in', '--target-lang', 'out'] + (extra_flags or []))
train.main(train_args)
</DeepExtract>
<DeepExtract>
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(generate_parser, [data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar'])
generate.main(generate_args)
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
</DeepExtract>
|
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset + ex_len]))
print(ex_str, file=h)
offset += ex_len
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
preprocess_parser = preprocess.get_parser()
preprocess_args = preprocess_parser.parse_args(['--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir])
preprocess.main(preprocess_args)
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, ['--task', 'translation', data_dir, '--save-dir', data_dir, '--arch', 'fconv_iwslt_de_en', '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--source-lang', 'in', '--target-lang', 'out'] + (extra_flags or []))
train.main(train_args)
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(generate_parser, [data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar'])
generate.main(generate_args)
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
</DeepExtract>
|
dlcl
|
positive
|
def test_register_as_callable(self):
""" ensure register can be used as a callable to take a name
and a normxcorr func """
<DeepExtract>
def func(templates, stream, pads, *args, **kwargs):
pass
func.__name__ = str('funky')
func = func
</DeepExtract>
register_array_xcorr(name='func3', func=func)
assert self.name_func_is_registered('func3')
|
def test_register_as_callable(self):
""" ensure register can be used as a callable to take a name
and a normxcorr func """
def func(templates, stream, pads, *args, **kwargs):
pass
func.__name__ = str('funky')
func = func
register_array_xcorr(name='func3', func=func)
assert self.name_func_is_registered('func3')
|
EQcorrscan
|
positive
|
def pressure_ashpa(self):
"""
Reads the atmospheric pressure
"""
hpa = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_pressure = None
raw_temperature = None
value_d_p1 = None
value_d_p2 = None
value_d_p3 = None
value_d_p4 = None
value_d_p5 = None
value_d_p6 = None
value_d_p7 = None
value_d_p8 = None
value_d_p9 = None
value_lsb = None
value_msb = None
value_xlsb = None
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSUREMSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_msb = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSURELSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_lsb = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSUREXLSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_xlsb = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP1
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
value_d_p1 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP2
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p2 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP3
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p3 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP4
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p4 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP5
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p5 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP6
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p6 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP7
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p7 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP8
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p8 = val
</DeepExtract>
<DeepExtract>
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP9
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p9 = val
</DeepExtract>
<DeepExtract>
celsius = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_temp = None
value_d_t1 = None
value_d_t2 = None
value_d_t3 = None
value_lsb = None
value_msb = None
value_xlsb = None
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
value_d_t1 = self.get_digt1()
value_d_t2 = self.get_digt2()
value_d_t3 = self.get_digt3()
raw_temp = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = (raw_temp / 16384.0 - value_d_t1 / 1024.0) * value_d_t2
raw_comp3 = raw_temp / 131072.0 - value_d_t1 / 8192.0
raw_comp2 = raw_comp3 * raw_comp3 * value_d_t3
celsius = (raw_comp1 + raw_comp2) / 5120.0
raw_temperature = celsius
</DeepExtract>
raw_temperature = raw_temperature * 5120.0
raw_pressure = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = raw_temperature / 2 - 64000.0
raw_comp2 = raw_comp1 * raw_comp1 * value_d_p6 / 32768.0
raw_comp2 = raw_comp2 + raw_comp1 * value_d_p5 * 2.0
raw_comp2 = raw_comp2 / 4.0 + value_d_p4 * 65536.0
raw_comp3 = value_d_p3 * raw_comp1 * raw_comp1
raw_comp1 = (raw_comp3 / 524288.0 + value_d_p2 * raw_comp1) / 524288.0
raw_comp1 = (1.0 + raw_comp1 / 32768.0) * value_d_p1
hpa = 1048576.0 - raw_pressure
hpa = (hpa - raw_comp2 / 4096.0) * (6250.0 / raw_comp1)
raw_comp1 = value_d_p9 * hpa * hpa / 2147483648.0
raw_comp2 = hpa * value_d_p8 / 32768.0
hpa = hpa + (raw_comp1 + raw_comp2 + value_d_p7) / 16.0
hpa = hpa / 100.0
return hpa
|
def pressure_ashpa(self):
"""
Reads the atmospheric pressure
"""
hpa = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_pressure = None
raw_temperature = None
value_d_p1 = None
value_d_p2 = None
value_d_p3 = None
value_d_p4 = None
value_d_p5 = None
value_d_p6 = None
value_d_p7 = None
value_d_p8 = None
value_d_p9 = None
value_lsb = None
value_msb = None
value_xlsb = None
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSUREMSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_msb = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSURELSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_lsb = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_PRESSUREXLSB
read_list = bytearray(1)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
value_xlsb = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP1
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
value_d_p1 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP2
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p2 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP3
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p3 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP4
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p4 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP5
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p5 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP6
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p6 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP7
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p7 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP8
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p8 = val
write_list = bytearray(1)
write_list[0] = self.REGISTER_DIGP9
read_list = bytearray(2)
with self.i2c_device as i2c:
i2c.write_then_readinto(write_list, read_list)
val = 0
val = val << 8 | read_list[0]
val = val << 8 | read_list[1]
val = _sign(val, 16)
value_d_p9 = val
celsius = None
raw_comp1 = None
raw_comp2 = None
raw_comp3 = None
raw_temp = None
value_d_t1 = None
value_d_t2 = None
value_d_t3 = None
value_lsb = None
value_msb = None
value_xlsb = None
value_msb = self.get_tempmsb()
value_lsb = self.get_templsb()
value_xlsb = self.get_tempxlsb()
value_d_t1 = self.get_digt1()
value_d_t2 = self.get_digt2()
value_d_t3 = self.get_digt3()
raw_temp = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = (raw_temp / 16384.0 - value_d_t1 / 1024.0) * value_d_t2
raw_comp3 = raw_temp / 131072.0 - value_d_t1 / 8192.0
raw_comp2 = raw_comp3 * raw_comp3 * value_d_t3
celsius = (raw_comp1 + raw_comp2) / 5120.0
raw_temperature = celsius
raw_temperature = raw_temperature * 5120.0
raw_pressure = (value_msb << 12) + (value_lsb << 4) + (value_xlsb >> 4)
raw_comp1 = raw_temperature / 2 - 64000.0
raw_comp2 = raw_comp1 * raw_comp1 * value_d_p6 / 32768.0
raw_comp2 = raw_comp2 + raw_comp1 * value_d_p5 * 2.0
raw_comp2 = raw_comp2 / 4.0 + value_d_p4 * 65536.0
raw_comp3 = value_d_p3 * raw_comp1 * raw_comp1
raw_comp1 = (raw_comp3 / 524288.0 + value_d_p2 * raw_comp1) / 524288.0
raw_comp1 = (1.0 + raw_comp1 / 32768.0) * value_d_p1
hpa = 1048576.0 - raw_pressure
hpa = (hpa - raw_comp2 / 4096.0) * (6250.0 / raw_comp1)
raw_comp1 = value_d_p9 * hpa * hpa / 2147483648.0
raw_comp2 = hpa * value_d_p8 / 32768.0
hpa = hpa + (raw_comp1 + raw_comp2 + value_d_p7) / 16.0
hpa = hpa / 100.0
return hpa
|
cyanobyte
|
positive
|
def get_metrics(experiment_metrics, experiment_metrics_bag_filename, map_waypoints, experiment_metrics_filename):
time_counter = 5
while not os.path.exists(experiment_metrics_bag_filename):
time.sleep(1)
time_counter -= 1
if time_counter <= 0:
ValueError(f"{experiment_metrics_bag_filename} isn't a file!")
return {}
try:
bag_reader = bagreader(experiment_metrics_bag_filename)
except rosbag.bag.ROSBagException:
return {}
csv_files = []
for topic in bag_reader.topics:
data = bag_reader.message_by_topic(topic)
csv_files.append(data)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-odometry.csv'
dataframe_pose = pd.read_csv(data_file)
checkpoints = []
for (index, row) in dataframe_pose.iterrows():
checkpoints.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/clock.csv'
dataframe_clock = pd.read_csv(data_file)
clock_points = []
for (index, row) in dataframe_clock.iterrows():
clock_points.append(row)
start_clock = clock_points[0]
seconds_start = start_clock['clock.secs']
seconds_end = clock_points[len(clock_points) - 1]['clock.secs']
collision_points = []
if '/carla/ego_vehicle/collision' in bag_reader.topics:
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-collision.csv'
dataframe_collision = pd.read_csv(data_file)
for (index, row) in dataframe_collision.iterrows():
collision_points.append(row)
lane_invasion_points = []
if '/carla/ego_vehicle/lane_invasion' in bag_reader.topics:
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-lane_invasion.csv'
dataframe_lane_invasion = pd.read_csv(data_file, on_bad_lines='skip')
for (index, row) in dataframe_lane_invasion.iterrows():
lane_invasion_points.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-speedometer.csv'
dataframe_speedometer = pd.read_csv(data_file)
speedometer_points = []
for (index, row) in dataframe_speedometer.iterrows():
speedometer_points.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-vehicle_status.csv'
dataframe_vehicle_status = pd.read_csv(data_file)
vehicle_status_points = []
for (index, row) in dataframe_vehicle_status.iterrows():
vehicle_status_points.append(row)
if len(checkpoints) > 1:
starting_point = checkpoints[0]
starting_point = (starting_point['pose.pose.position.x'], starting_point['pose.pose.position.y'])
experiment_metrics['starting_point'] = starting_point
<DeepExtract>
end_point = checkpoints[len(checkpoints) - 1]
experiment_metrics['completed_distance'] = circuit_distance_completed(checkpoints, end_point)
experiment_metrics = experiment_metrics
</DeepExtract>
<DeepExtract>
previous_speed = 0
speedometer_points_sum = 0
suddenness_distance_speeds = []
speed_points = []
for point in speedometer_points:
speed_point = point.data * 3.6
speedometer_points_sum += speed_point
a = np.array(speed_point)
b = np.array(previous_speed)
suddenness_distance_speed = np.linalg.norm(a - b)
suddenness_distance_speeds.append(suddenness_distance_speed)
previous_speed = speed_point
speed_points.append(speed_point)
experiment_metrics['average_speed'] = speedometer_points_sum / len(speedometer_points)
suddenness_distance_speed = sum(suddenness_distance_speeds) / len(suddenness_distance_speeds)
experiment_metrics['suddenness_distance_speed'] = suddenness_distance_speed
experiment_metrics['max_speed'] = max(speed_points)
experiment_metrics['min_speed'] = min(speed_points)
experiment_metrics = experiment_metrics
</DeepExtract>
<DeepExtract>
previous_commanded_throttle = 0
previous_commanded_steer = 0
previous_commanded_brake = 0
suddenness_distance_control_commands = []
suddenness_distance_throttle = []
suddenness_distance_steer = []
suddenness_distance_brake_command = []
for point in vehicle_status_points:
throttle = point['control.throttle']
steer = point['control.steer']
brake_command = point['control.brake']
a = np.array((throttle, steer, brake_command))
b = np.array((previous_commanded_throttle, previous_commanded_steer, previous_commanded_brake))
distance = np.linalg.norm(a - b)
suddenness_distance_control_commands.append(distance)
a = np.array(throttle)
b = np.array(previous_commanded_throttle)
distance_throttle = np.linalg.norm(a - b)
suddenness_distance_throttle.append(distance_throttle)
a = np.array(steer)
b = np.array(previous_commanded_steer)
distance_steer = np.linalg.norm(a - b)
suddenness_distance_steer.append(distance_steer)
a = np.array(brake_command)
b = np.array(previous_commanded_brake)
distance_brake_command = np.linalg.norm(a - b)
suddenness_distance_brake_command.append(distance_brake_command)
previous_commanded_throttle = throttle
previous_commanded_steer = steer
previous_commanded_brake = brake_command
experiment_metrics['suddenness_distance_control_commands'] = sum(suddenness_distance_control_commands) / len(suddenness_distance_control_commands)
experiment_metrics['suddenness_distance_throttle'] = sum(suddenness_distance_throttle) / len(suddenness_distance_throttle)
experiment_metrics['suddenness_distance_steer'] = sum(suddenness_distance_steer) / len(suddenness_distance_steer)
experiment_metrics['suddenness_distance_brake_command'] = sum(suddenness_distance_brake_command) / len(suddenness_distance_brake_command)
experiment_metrics = experiment_metrics
</DeepExtract>
<DeepExtract>
collisions_checkpoints = []
collisions_checkpoints_different = []
(previous_collisions_checkpoints_x, previous_collisions_checkpoints_y) = (0, 0)
for point in collision_points:
collision_point = dataframe_pose.loc[dataframe_pose['Time'] == point['Time']]
collisions_checkpoints.append(collision_point)
point_1 = np.array([collision_point.iloc[0]['pose.pose.position.x'], collision_point.iloc[0]['pose.pose.position.y']])
point_2 = np.array([previous_collisions_checkpoints_x, previous_collisions_checkpoints_y])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist > 1:
collisions_checkpoints_different.append(collision_point)
(previous_collisions_checkpoints_x, previous_collisions_checkpoints_y) = (collision_point.iloc[0]['pose.pose.position.x'], collision_point.iloc[0]['pose.pose.position.y'])
experiment_metrics['collisions'] = len(collisions_checkpoints_different)
(experiment_metrics, collisions_checkpoints) = (experiment_metrics, collisions_checkpoints)
</DeepExtract>
<DeepExtract>
lane_invasion_checkpoints = []
lane_invasion_checkpoints_different = []
(previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y) = (0, 0)
previous_time = 0
for point in lane_invasion_points:
lane_invasion_point = dataframe_pose.loc[dataframe_pose['Time'] == point['Time']]
lane_invasion_checkpoints.append(lane_invasion_point)
point_1 = np.array([lane_invasion_point.iloc[0]['pose.pose.position.x'], lane_invasion_point.iloc[0]['pose.pose.position.y']])
point_2 = np.array([previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist > 1 and point['Time'] - previous_time > 0.5:
lane_invasion_checkpoints_different.append(lane_invasion_point)
previous_time = point['Time']
(previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y) = (lane_invasion_point.iloc[0]['pose.pose.position.x'], lane_invasion_point.iloc[0]['pose.pose.position.y'])
experiment_metrics['lane_invasions'] = len(lane_invasion_checkpoints_different)
(experiment_metrics, lane_invasion_checkpoints) = (experiment_metrics, lane_invasion_checkpoints)
</DeepExtract>
experiment_metrics['experiment_total_simulated_time'] = seconds_end - seconds_start
if 'bird_eye_view_images' in experiment_metrics:
experiment_metrics['bird_eye_view_images_per_second'] = experiment_metrics['bird_eye_view_images'] / experiment_metrics['experiment_total_simulated_time']
experiment_metrics['bird_eye_view_unique_images_per_second'] = experiment_metrics['bird_eye_view_unique_images'] / experiment_metrics['experiment_total_simulated_time']
<DeepExtract>
map_waypoints_tuples = []
map_waypoints_tuples_x = []
map_waypoints_tuples_y = []
for waypoint in map_waypoints:
if experiment_metrics['carla_map'] == 'Carla/Maps/Town04' or experiment_metrics['carla_map'] == 'Carla/Maps/Town04_Opt':
map_waypoints_tuples_x.append(-waypoint.transform.location.x)
map_waypoints_tuples_y.append(waypoint.transform.location.y)
map_waypoints_tuples.append((-waypoint.transform.location.x, waypoint.transform.location.y))
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town06' or experiment_metrics['carla_map'] == 'Carla/Maps/Town06_Opt':
map_waypoints_tuples_x.append(waypoint.transform.location.x)
map_waypoints_tuples_y.append(-waypoint.transform.location.y)
map_waypoints_tuples.append((waypoint.transform.location.x, -waypoint.transform.location.y))
else:
map_waypoints_tuples_x.append(waypoint.transform.location.x)
map_waypoints_tuples_y.append(waypoint.transform.location.y)
map_waypoints_tuples.append((waypoint.transform.location.x, waypoint.transform.location.y))
checkpoints_tuples = []
checkpoints_tuples_x = []
checkpoints_tuples_y = []
checkpoints_speeds = []
for (i, point) in enumerate(checkpoints):
current_checkpoint = np.array([point['pose.pose.position.x'], point['pose.pose.position.y'], speedometer_points[i]['data'] * 3.6])
if experiment_metrics['carla_map'] == 'Carla/Maps/Town01' or experiment_metrics['carla_map'] == 'Carla/Maps/Town02' or experiment_metrics['carla_map'] == 'Carla/Maps/Town01_Opt' or (experiment_metrics['carla_map'] == 'Carla/Maps/Town02_Opt'):
checkpoint_x = max(map_waypoints_tuples_x) + min(map_waypoints_tuples_x) - current_checkpoint[0]
checkpoint_y = -point['pose.pose.position.y']
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town03' or experiment_metrics['carla_map'] == 'Carla/Maps/Town07' or experiment_metrics['carla_map'] == 'Carla/Maps/Town03_Opt' or (experiment_metrics['carla_map'] == 'Carla/Maps/Town07_Opt'):
checkpoint_x = current_checkpoint[0]
checkpoint_y = -current_checkpoint[1]
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town04' or experiment_metrics['carla_map'] == 'Carla/Maps/Town04_Opt':
checkpoint_x = -current_checkpoint[0]
checkpoint_y = -current_checkpoint[1]
else:
checkpoint_x = current_checkpoint[0]
checkpoint_y = current_checkpoint[1]
checkpoints_tuples_x.append(checkpoint_x)
checkpoints_tuples_y.append(checkpoint_y)
checkpoints_speeds.append(current_checkpoint[2])
checkpoints_tuples.append((checkpoint_x, checkpoint_y, current_checkpoint[2]))
min_dists = []
best_checkpoint_points_x = []
best_checkpoint_points_y = []
covered_checkpoints = []
for (error_counter, checkpoint) in enumerate(checkpoints_tuples):
min_dist = 100
for (x, perfect_checkpoint) in enumerate(map_waypoints_tuples):
point_1 = np.array([checkpoint[0], checkpoint[1]])
point_2 = np.array([perfect_checkpoint[0], perfect_checkpoint[1]])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist < min_dist:
min_dist = dist
best_checkpoint = x
best_checkpoint_point_x = point_2[0]
best_checkpoint_point_y = point_2[1]
best_checkpoint_points_x.append(best_checkpoint_point_x)
best_checkpoint_points_y.append(best_checkpoint_point_y)
if min_dist < 100:
min_dists.append(min_dist)
if len(covered_checkpoints) == 0 or (len(covered_checkpoints) > 0 and covered_checkpoints[len(covered_checkpoints) - 1][0] != best_checkpoint_point_x and (covered_checkpoints[len(covered_checkpoints) - 1][1] != best_checkpoint_point_y)):
if min_dist < 1:
covered_checkpoints.append((best_checkpoint_point_x, best_checkpoint_point_y))
experiment_metrics['effective_completed_distance'] = len(covered_checkpoints) * 0.5
experiment_metrics['position_deviation_mean'] = sum(min_dists) / len(min_dists)
experiment_metrics['position_deviation_total_err'] = sum(min_dists)
experiment_metrics['position_deviation_mean_per_km'] = experiment_metrics['position_deviation_mean'] / (experiment_metrics['effective_completed_distance'] / 1000)
starting_point_map = (checkpoints_tuples_x[0], checkpoints_tuples_y[0])
experiment_metrics['starting_point_map'] = starting_point_map
if experiment_metrics['collisions'] > 0:
experiment_metrics['collisions_per_km'] = experiment_metrics['collisions'] / (experiment_metrics['effective_completed_distance'] / 1000)
else:
experiment_metrics['collisions_per_km'] = 0
if experiment_metrics['lane_invasions'] > 0:
experiment_metrics['lane_invasions_per_km'] = experiment_metrics['lane_invasions'] / (experiment_metrics['effective_completed_distance'] / 1000)
else:
experiment_metrics['lane_invasions_per_km'] = 0
experiment_metrics['suddenness_distance_control_command_per_km'] = experiment_metrics['suddenness_distance_control_commands'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_throttle_per_km'] = experiment_metrics['suddenness_distance_throttle'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_steer_per_km'] = experiment_metrics['suddenness_distance_steer'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_brake_command_per_km'] = experiment_metrics['suddenness_distance_brake_command'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_speed_per_km'] = experiment_metrics['suddenness_distance_speed'] / (experiment_metrics['effective_completed_distance'] / 1000)
create_experiment_maps(experiment_metrics, experiment_metrics_filename, map_waypoints_tuples_x, map_waypoints_tuples_y, best_checkpoint_points_x, best_checkpoint_points_y, checkpoints_tuples_x, checkpoints_tuples_y, checkpoints_speeds, collisions_checkpoints, lane_invasion_checkpoints)
experiment_metrics = experiment_metrics
</DeepExtract>
<DeepExtract>
points_to_start_count = 50
completed_laps = 0
for (x, checkpoint) in enumerate(checkpoints):
if points_to_start_count > 0:
points_to_start_count -= 1
else:
point_1 = np.array([checkpoint['pose.pose.position.x'], checkpoint['pose.pose.position.y']])
point_2 = np.array([starting_point[0], starting_point[1]])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist < 0.5:
completed_laps += 1
points_to_start_count = 50
experiment_metrics['completed_laps'] = completed_laps
</DeepExtract>
shutil.rmtree(experiment_metrics_bag_filename.split('.bag')[0])
return experiment_metrics
else:
return {}
|
def get_metrics(experiment_metrics, experiment_metrics_bag_filename, map_waypoints, experiment_metrics_filename):
time_counter = 5
while not os.path.exists(experiment_metrics_bag_filename):
time.sleep(1)
time_counter -= 1
if time_counter <= 0:
ValueError(f"{experiment_metrics_bag_filename} isn't a file!")
return {}
try:
bag_reader = bagreader(experiment_metrics_bag_filename)
except rosbag.bag.ROSBagException:
return {}
csv_files = []
for topic in bag_reader.topics:
data = bag_reader.message_by_topic(topic)
csv_files.append(data)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-odometry.csv'
dataframe_pose = pd.read_csv(data_file)
checkpoints = []
for (index, row) in dataframe_pose.iterrows():
checkpoints.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/clock.csv'
dataframe_clock = pd.read_csv(data_file)
clock_points = []
for (index, row) in dataframe_clock.iterrows():
clock_points.append(row)
start_clock = clock_points[0]
seconds_start = start_clock['clock.secs']
seconds_end = clock_points[len(clock_points) - 1]['clock.secs']
collision_points = []
if '/carla/ego_vehicle/collision' in bag_reader.topics:
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-collision.csv'
dataframe_collision = pd.read_csv(data_file)
for (index, row) in dataframe_collision.iterrows():
collision_points.append(row)
lane_invasion_points = []
if '/carla/ego_vehicle/lane_invasion' in bag_reader.topics:
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-lane_invasion.csv'
dataframe_lane_invasion = pd.read_csv(data_file, on_bad_lines='skip')
for (index, row) in dataframe_lane_invasion.iterrows():
lane_invasion_points.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-speedometer.csv'
dataframe_speedometer = pd.read_csv(data_file)
speedometer_points = []
for (index, row) in dataframe_speedometer.iterrows():
speedometer_points.append(row)
data_file = experiment_metrics_bag_filename.split('.bag')[0] + '/carla-ego_vehicle-vehicle_status.csv'
dataframe_vehicle_status = pd.read_csv(data_file)
vehicle_status_points = []
for (index, row) in dataframe_vehicle_status.iterrows():
vehicle_status_points.append(row)
if len(checkpoints) > 1:
starting_point = checkpoints[0]
starting_point = (starting_point['pose.pose.position.x'], starting_point['pose.pose.position.y'])
experiment_metrics['starting_point'] = starting_point
end_point = checkpoints[len(checkpoints) - 1]
experiment_metrics['completed_distance'] = circuit_distance_completed(checkpoints, end_point)
experiment_metrics = experiment_metrics
previous_speed = 0
speedometer_points_sum = 0
suddenness_distance_speeds = []
speed_points = []
for point in speedometer_points:
speed_point = point.data * 3.6
speedometer_points_sum += speed_point
a = np.array(speed_point)
b = np.array(previous_speed)
suddenness_distance_speed = np.linalg.norm(a - b)
suddenness_distance_speeds.append(suddenness_distance_speed)
previous_speed = speed_point
speed_points.append(speed_point)
experiment_metrics['average_speed'] = speedometer_points_sum / len(speedometer_points)
suddenness_distance_speed = sum(suddenness_distance_speeds) / len(suddenness_distance_speeds)
experiment_metrics['suddenness_distance_speed'] = suddenness_distance_speed
experiment_metrics['max_speed'] = max(speed_points)
experiment_metrics['min_speed'] = min(speed_points)
experiment_metrics = experiment_metrics
previous_commanded_throttle = 0
previous_commanded_steer = 0
previous_commanded_brake = 0
suddenness_distance_control_commands = []
suddenness_distance_throttle = []
suddenness_distance_steer = []
suddenness_distance_brake_command = []
for point in vehicle_status_points:
throttle = point['control.throttle']
steer = point['control.steer']
brake_command = point['control.brake']
a = np.array((throttle, steer, brake_command))
b = np.array((previous_commanded_throttle, previous_commanded_steer, previous_commanded_brake))
distance = np.linalg.norm(a - b)
suddenness_distance_control_commands.append(distance)
a = np.array(throttle)
b = np.array(previous_commanded_throttle)
distance_throttle = np.linalg.norm(a - b)
suddenness_distance_throttle.append(distance_throttle)
a = np.array(steer)
b = np.array(previous_commanded_steer)
distance_steer = np.linalg.norm(a - b)
suddenness_distance_steer.append(distance_steer)
a = np.array(brake_command)
b = np.array(previous_commanded_brake)
distance_brake_command = np.linalg.norm(a - b)
suddenness_distance_brake_command.append(distance_brake_command)
previous_commanded_throttle = throttle
previous_commanded_steer = steer
previous_commanded_brake = brake_command
experiment_metrics['suddenness_distance_control_commands'] = sum(suddenness_distance_control_commands) / len(suddenness_distance_control_commands)
experiment_metrics['suddenness_distance_throttle'] = sum(suddenness_distance_throttle) / len(suddenness_distance_throttle)
experiment_metrics['suddenness_distance_steer'] = sum(suddenness_distance_steer) / len(suddenness_distance_steer)
experiment_metrics['suddenness_distance_brake_command'] = sum(suddenness_distance_brake_command) / len(suddenness_distance_brake_command)
experiment_metrics = experiment_metrics
collisions_checkpoints = []
collisions_checkpoints_different = []
(previous_collisions_checkpoints_x, previous_collisions_checkpoints_y) = (0, 0)
for point in collision_points:
collision_point = dataframe_pose.loc[dataframe_pose['Time'] == point['Time']]
collisions_checkpoints.append(collision_point)
point_1 = np.array([collision_point.iloc[0]['pose.pose.position.x'], collision_point.iloc[0]['pose.pose.position.y']])
point_2 = np.array([previous_collisions_checkpoints_x, previous_collisions_checkpoints_y])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist > 1:
collisions_checkpoints_different.append(collision_point)
(previous_collisions_checkpoints_x, previous_collisions_checkpoints_y) = (collision_point.iloc[0]['pose.pose.position.x'], collision_point.iloc[0]['pose.pose.position.y'])
experiment_metrics['collisions'] = len(collisions_checkpoints_different)
(experiment_metrics, collisions_checkpoints) = (experiment_metrics, collisions_checkpoints)
lane_invasion_checkpoints = []
lane_invasion_checkpoints_different = []
(previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y) = (0, 0)
previous_time = 0
for point in lane_invasion_points:
lane_invasion_point = dataframe_pose.loc[dataframe_pose['Time'] == point['Time']]
lane_invasion_checkpoints.append(lane_invasion_point)
point_1 = np.array([lane_invasion_point.iloc[0]['pose.pose.position.x'], lane_invasion_point.iloc[0]['pose.pose.position.y']])
point_2 = np.array([previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist > 1 and point['Time'] - previous_time > 0.5:
lane_invasion_checkpoints_different.append(lane_invasion_point)
previous_time = point['Time']
(previous_lane_invasion_checkpoints_x, previous_lane_invasion_checkpoints_y) = (lane_invasion_point.iloc[0]['pose.pose.position.x'], lane_invasion_point.iloc[0]['pose.pose.position.y'])
experiment_metrics['lane_invasions'] = len(lane_invasion_checkpoints_different)
(experiment_metrics, lane_invasion_checkpoints) = (experiment_metrics, lane_invasion_checkpoints)
experiment_metrics['experiment_total_simulated_time'] = seconds_end - seconds_start
if 'bird_eye_view_images' in experiment_metrics:
experiment_metrics['bird_eye_view_images_per_second'] = experiment_metrics['bird_eye_view_images'] / experiment_metrics['experiment_total_simulated_time']
experiment_metrics['bird_eye_view_unique_images_per_second'] = experiment_metrics['bird_eye_view_unique_images'] / experiment_metrics['experiment_total_simulated_time']
map_waypoints_tuples = []
map_waypoints_tuples_x = []
map_waypoints_tuples_y = []
for waypoint in map_waypoints:
if experiment_metrics['carla_map'] == 'Carla/Maps/Town04' or experiment_metrics['carla_map'] == 'Carla/Maps/Town04_Opt':
map_waypoints_tuples_x.append(-waypoint.transform.location.x)
map_waypoints_tuples_y.append(waypoint.transform.location.y)
map_waypoints_tuples.append((-waypoint.transform.location.x, waypoint.transform.location.y))
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town06' or experiment_metrics['carla_map'] == 'Carla/Maps/Town06_Opt':
map_waypoints_tuples_x.append(waypoint.transform.location.x)
map_waypoints_tuples_y.append(-waypoint.transform.location.y)
map_waypoints_tuples.append((waypoint.transform.location.x, -waypoint.transform.location.y))
else:
map_waypoints_tuples_x.append(waypoint.transform.location.x)
map_waypoints_tuples_y.append(waypoint.transform.location.y)
map_waypoints_tuples.append((waypoint.transform.location.x, waypoint.transform.location.y))
checkpoints_tuples = []
checkpoints_tuples_x = []
checkpoints_tuples_y = []
checkpoints_speeds = []
for (i, point) in enumerate(checkpoints):
current_checkpoint = np.array([point['pose.pose.position.x'], point['pose.pose.position.y'], speedometer_points[i]['data'] * 3.6])
if experiment_metrics['carla_map'] == 'Carla/Maps/Town01' or experiment_metrics['carla_map'] == 'Carla/Maps/Town02' or experiment_metrics['carla_map'] == 'Carla/Maps/Town01_Opt' or (experiment_metrics['carla_map'] == 'Carla/Maps/Town02_Opt'):
checkpoint_x = max(map_waypoints_tuples_x) + min(map_waypoints_tuples_x) - current_checkpoint[0]
checkpoint_y = -point['pose.pose.position.y']
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town03' or experiment_metrics['carla_map'] == 'Carla/Maps/Town07' or experiment_metrics['carla_map'] == 'Carla/Maps/Town03_Opt' or (experiment_metrics['carla_map'] == 'Carla/Maps/Town07_Opt'):
checkpoint_x = current_checkpoint[0]
checkpoint_y = -current_checkpoint[1]
elif experiment_metrics['carla_map'] == 'Carla/Maps/Town04' or experiment_metrics['carla_map'] == 'Carla/Maps/Town04_Opt':
checkpoint_x = -current_checkpoint[0]
checkpoint_y = -current_checkpoint[1]
else:
checkpoint_x = current_checkpoint[0]
checkpoint_y = current_checkpoint[1]
checkpoints_tuples_x.append(checkpoint_x)
checkpoints_tuples_y.append(checkpoint_y)
checkpoints_speeds.append(current_checkpoint[2])
checkpoints_tuples.append((checkpoint_x, checkpoint_y, current_checkpoint[2]))
min_dists = []
best_checkpoint_points_x = []
best_checkpoint_points_y = []
covered_checkpoints = []
for (error_counter, checkpoint) in enumerate(checkpoints_tuples):
min_dist = 100
for (x, perfect_checkpoint) in enumerate(map_waypoints_tuples):
point_1 = np.array([checkpoint[0], checkpoint[1]])
point_2 = np.array([perfect_checkpoint[0], perfect_checkpoint[1]])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist < min_dist:
min_dist = dist
best_checkpoint = x
best_checkpoint_point_x = point_2[0]
best_checkpoint_point_y = point_2[1]
best_checkpoint_points_x.append(best_checkpoint_point_x)
best_checkpoint_points_y.append(best_checkpoint_point_y)
if min_dist < 100:
min_dists.append(min_dist)
if len(covered_checkpoints) == 0 or (len(covered_checkpoints) > 0 and covered_checkpoints[len(covered_checkpoints) - 1][0] != best_checkpoint_point_x and (covered_checkpoints[len(covered_checkpoints) - 1][1] != best_checkpoint_point_y)):
if min_dist < 1:
covered_checkpoints.append((best_checkpoint_point_x, best_checkpoint_point_y))
experiment_metrics['effective_completed_distance'] = len(covered_checkpoints) * 0.5
experiment_metrics['position_deviation_mean'] = sum(min_dists) / len(min_dists)
experiment_metrics['position_deviation_total_err'] = sum(min_dists)
experiment_metrics['position_deviation_mean_per_km'] = experiment_metrics['position_deviation_mean'] / (experiment_metrics['effective_completed_distance'] / 1000)
starting_point_map = (checkpoints_tuples_x[0], checkpoints_tuples_y[0])
experiment_metrics['starting_point_map'] = starting_point_map
if experiment_metrics['collisions'] > 0:
experiment_metrics['collisions_per_km'] = experiment_metrics['collisions'] / (experiment_metrics['effective_completed_distance'] / 1000)
else:
experiment_metrics['collisions_per_km'] = 0
if experiment_metrics['lane_invasions'] > 0:
experiment_metrics['lane_invasions_per_km'] = experiment_metrics['lane_invasions'] / (experiment_metrics['effective_completed_distance'] / 1000)
else:
experiment_metrics['lane_invasions_per_km'] = 0
experiment_metrics['suddenness_distance_control_command_per_km'] = experiment_metrics['suddenness_distance_control_commands'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_throttle_per_km'] = experiment_metrics['suddenness_distance_throttle'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_steer_per_km'] = experiment_metrics['suddenness_distance_steer'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_brake_command_per_km'] = experiment_metrics['suddenness_distance_brake_command'] / (experiment_metrics['effective_completed_distance'] / 1000)
experiment_metrics['suddenness_distance_speed_per_km'] = experiment_metrics['suddenness_distance_speed'] / (experiment_metrics['effective_completed_distance'] / 1000)
create_experiment_maps(experiment_metrics, experiment_metrics_filename, map_waypoints_tuples_x, map_waypoints_tuples_y, best_checkpoint_points_x, best_checkpoint_points_y, checkpoints_tuples_x, checkpoints_tuples_y, checkpoints_speeds, collisions_checkpoints, lane_invasion_checkpoints)
experiment_metrics = experiment_metrics
points_to_start_count = 50
completed_laps = 0
for (x, checkpoint) in enumerate(checkpoints):
if points_to_start_count > 0:
points_to_start_count -= 1
else:
point_1 = np.array([checkpoint['pose.pose.position.x'], checkpoint['pose.pose.position.y']])
point_2 = np.array([starting_point[0], starting_point[1]])
dist = (point_2 - point_1) ** 2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
if dist < 0.5:
completed_laps += 1
points_to_start_count = 50
experiment_metrics['completed_laps'] = completed_laps
shutil.rmtree(experiment_metrics_bag_filename.split('.bag')[0])
return experiment_metrics
else:
return {}
|
BehaviorMetrics
|
positive
|
def vstack_ims(ims, bg_color=(0, 0, 0)):
if len(ims) == 0:
return make(0, 0)
max_w = max([im.shape[1] for im in ims])
result = []
for im in ims:
<DeepExtract>
frame = np.uint8(np.tile([[bg_color]], (im.shape[0], max_w, 1)))
</DeepExtract>
<DeepExtract>
if im.ndim == 3 and im.shape[2] == 3:
frame[:im.shape[0], :im.shape[1]] = im.copy() if copy else im
elif im.ndim == 3 and im.shape[2] == 4:
frame[:im.shape[0], :im.shape[1]] = (im.copy() if copy else im)[..., :3]
elif im.ndim == 3 and im.shape[2] == 1:
frame[:im.shape[0], :im.shape[1]] = np.tile(im, (1, 1, 3))
elif im.ndim == 2:
frame[:im.shape[0], :im.shape[1]] = np.tile(im[:, :, np.newaxis], (1, 1, 3))
else:
raise RuntimeError('Cannot convert to rgb. Shape: ' + str(im.shape))
</DeepExtract>
result.append(frame)
return np.vstack(result)
|
def vstack_ims(ims, bg_color=(0, 0, 0)):
if len(ims) == 0:
return make(0, 0)
max_w = max([im.shape[1] for im in ims])
result = []
for im in ims:
frame = np.uint8(np.tile([[bg_color]], (im.shape[0], max_w, 1)))
if im.ndim == 3 and im.shape[2] == 3:
frame[:im.shape[0], :im.shape[1]] = im.copy() if copy else im
elif im.ndim == 3 and im.shape[2] == 4:
frame[:im.shape[0], :im.shape[1]] = (im.copy() if copy else im)[..., :3]
elif im.ndim == 3 and im.shape[2] == 1:
frame[:im.shape[0], :im.shape[1]] = np.tile(im, (1, 1, 3))
elif im.ndim == 2:
frame[:im.shape[0], :im.shape[1]] = np.tile(im[:, :, np.newaxis], (1, 1, 3))
else:
raise RuntimeError('Cannot convert to rgb. Shape: ' + str(im.shape))
result.append(frame)
return np.vstack(result)
|
avobjects
|
positive
|
def test_201_centos7_httpd_dockerfile(self):
""" WHEN using a dockerfile for systemd-enabled CentOS 7 and python2,
THEN we can create an image with an Apache HTTP service
being installed and enabled.
Without a special startup.sh script or container-cmd
one can just start the image and in the container
expecting that the service is started. Therefore,
WHEN we start the image as a docker container
THEN we can download the root html showing 'OK'
because the test script has placed an index.html
in the webserver containing that text. """
if not os.path.exists(DOCKER_SOCKET):
self.skipTest('docker-based test')
docker = _docker
curl = _curl
<DeepExtract>
name = self.caller_testname()
if suffix:
testname = name + '_' + suffix
testname = name
</DeepExtract>
<DeepExtract>
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
os.makedirs(newdir)
testdir = newdir
</DeepExtract>
name = 'centos7-httpd'
dockerfile = 'centos7-httpd.dockerfile'
<DeepExtract>
image = ''
for line in open(dockerfile):
m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line)
if m:
image = m.group(1)
break
m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line)
if m:
image = m.group(1).strip()
break
logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image)
if image:
addhosts = self.start_mirror(image, extras)
addhosts = ''
</DeepExtract>
<DeepExtract>
savename = os.path.splitext(os.path.basename(dockerfile))[0]
</DeepExtract>
saveto = SAVETO
images = IMAGES
cmd = '{docker} build . -f {dockerfile} {addhosts} --tag {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rm --force {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} run -d --name {testname} {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
<DeepExtract>
docker = _docker
cmd = '{docker} inspect {name}'
values = output(cmd.format(**locals()))
values = json.loads(values)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', testname, values)
container = values[0]['NetworkSettings']['IPAddress']
</DeepExtract>
cmd = 'sleep 5; {curl} -o {testdir}/{testname}.txt http://{container}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = 'grep OK {testdir}/{testname}.txt'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} stop {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rm --force {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rmi {saveto}/{savename}:latest'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rmi {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
<DeepExtract>
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
return newdir
</DeepExtract>
|
def test_201_centos7_httpd_dockerfile(self):
""" WHEN using a dockerfile for systemd-enabled CentOS 7 and python2,
THEN we can create an image with an Apache HTTP service
being installed and enabled.
Without a special startup.sh script or container-cmd
one can just start the image and in the container
expecting that the service is started. Therefore,
WHEN we start the image as a docker container
THEN we can download the root html showing 'OK'
because the test script has placed an index.html
in the webserver containing that text. """
if not os.path.exists(DOCKER_SOCKET):
self.skipTest('docker-based test')
docker = _docker
curl = _curl
name = self.caller_testname()
if suffix:
testname = name + '_' + suffix
testname = name
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
os.makedirs(newdir)
testdir = newdir
name = 'centos7-httpd'
dockerfile = 'centos7-httpd.dockerfile'
image = ''
for line in open(dockerfile):
m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line)
if m:
image = m.group(1)
break
m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line)
if m:
image = m.group(1).strip()
break
logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image)
if image:
addhosts = self.start_mirror(image, extras)
addhosts = ''
savename = os.path.splitext(os.path.basename(dockerfile))[0]
saveto = SAVETO
images = IMAGES
cmd = '{docker} build . -f {dockerfile} {addhosts} --tag {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rm --force {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
cmd = '{docker} run -d --name {testname} {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
docker = _docker
cmd = '{docker} inspect {name}'
values = output(cmd.format(**locals()))
values = json.loads(values)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', testname, values)
container = values[0]['NetworkSettings']['IPAddress']
cmd = 'sleep 5; {curl} -o {testdir}/{testname}.txt http://{container}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = 'grep OK {testdir}/{testname}.txt'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} stop {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rm --force {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rmi {saveto}/{savename}:latest'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rmi {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
return newdir
</DeepExtract>
|
docker-systemctl-images
|
positive
|
@force_fp32(apply_to=('feats',), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
<DeepExtract>
scale = torch.sqrt((rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-06))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
target_lvls = target_lvls
</DeepExtract>
roi_feats = feats[0].new_zeros(rois.size(0), self.out_channels, *out_size)
if roi_scale_factor is not None:
<DeepExtract>
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * roi_scale_factor
new_h = h * roi_scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
rois = new_rois
</DeepExtract>
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
|
@force_fp32(apply_to=('feats',), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
scale = torch.sqrt((rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-06))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
target_lvls = target_lvls
roi_feats = feats[0].new_zeros(rois.size(0), self.out_channels, *out_size)
if roi_scale_factor is not None:
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * roi_scale_factor
new_h = h * roi_scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
rois = new_rois
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
|
ATSS-EfficientDet-PyTorch
|
positive
|
def change_parameters(self, config_path, parameter_dict: dict, overwrite: bool=False):
"""Changes multiple modules at once,
:param paramter_dict: nested dictionary in style:{module_name: dict(parameter_name = value)}"""
<DeepExtract>
self._read_config(config_path)
self._dlstream_dict = self._config._sections
</DeepExtract>
for key in parameter_dict.keys():
for (inner_key, value) in parameter_dict[key].items():
<DeepExtract>
inner_key = inner_key.upper()
if str(key).upper() in self._dlstream_dict.keys():
if inner_key in self._dlstream_dict[str(key).upper()].keys():
old_value = self._dlstream_dict[str(key).upper()][inner_key]
if not isinstance(value, str):
value = str(value)
self._dlstream_dict[str(key).upper()][inner_key] = value
print(f'Changed {inner_key} in {str(key).upper()} from {old_value} to {value}.')
else:
raise ValueError(f'Parameter {inner_key} does not exist in given config.')
else:
raise ValueError(f'Module {str(key).upper()} does not exist in given config.')
</DeepExtract>
if overwrite:
<DeepExtract>
if config_path is None:
file = self._set_path()
else:
self._filename = os.path.basename(config_path)
file = open(config_path, 'w')
self._config = self._init_configparser()
for key in self._dlstream_dict.keys():
self._config.add_section(key)
for (parameter, value) in self._dlstream_dict[key].items():
self._config.set(key, parameter, str(value))
self._config.write(file)
file.close()
print(f'Created {self._filename}.')
</DeepExtract>
|
def change_parameters(self, config_path, parameter_dict: dict, overwrite: bool=False):
"""Changes multiple modules at once,
:param paramter_dict: nested dictionary in style:{module_name: dict(parameter_name = value)}"""
self._read_config(config_path)
self._dlstream_dict = self._config._sections
for key in parameter_dict.keys():
for (inner_key, value) in parameter_dict[key].items():
inner_key = inner_key.upper()
if str(key).upper() in self._dlstream_dict.keys():
if inner_key in self._dlstream_dict[str(key).upper()].keys():
old_value = self._dlstream_dict[str(key).upper()][inner_key]
if not isinstance(value, str):
value = str(value)
self._dlstream_dict[str(key).upper()][inner_key] = value
print(f'Changed {inner_key} in {str(key).upper()} from {old_value} to {value}.')
else:
raise ValueError(f'Parameter {inner_key} does not exist in given config.')
else:
raise ValueError(f'Module {str(key).upper()} does not exist in given config.')
if overwrite:
if config_path is None:
file = self._set_path()
else:
self._filename = os.path.basename(config_path)
file = open(config_path, 'w')
self._config = self._init_configparser()
for key in self._dlstream_dict.keys():
self._config.add_section(key)
for (parameter, value) in self._dlstream_dict[key].items():
self._config.set(key, parameter, str(value))
self._config.write(file)
file.close()
print(f'Created {self._filename}.')
</DeepExtract>
|
DeepLabStream
|
positive
|
def delegatecall(evm: Evm) -> None:
"""
Message-call into an account.
Parameters
----------
evm :
The current EVM frame.
"""
gas = Uint(pop(evm.stack))
code_address = to_address(pop(evm.stack))
memory_input_start_position = pop(evm.stack)
memory_input_size = pop(evm.stack)
memory_output_start_position = pop(evm.stack)
memory_output_size = pop(evm.stack)
extend_memory = calculate_gas_extend_memory(evm.memory, [(memory_input_start_position, memory_input_size), (memory_output_start_position, memory_output_size)])
charge_gas(evm, GAS_CALL + gas + extend_memory.cost)
evm.memory += b'\x00' * extend_memory.expand_by
<DeepExtract>
from ...vm.interpreter import STACK_DEPTH_LIMIT, process_message
if evm.message.depth + 1 > STACK_DEPTH_LIMIT:
evm.gas_left += gas
push(evm.stack, U256(0))
return
call_data = memory_read_bytes(evm.memory, memory_input_start_position, memory_input_size)
code = get_account(evm.env.state, code_address).code
child_message = Message(caller=evm.message.caller, target=evm.message.current_target, gas=U256(gas), value=evm.message.value, data=call_data, code=code, current_target=evm.message.current_target, depth=evm.message.depth + 1, code_address=code_address, should_transfer_value=False)
child_evm = process_message(child_message, evm.env)
evm.children.append(child_evm)
if child_evm.has_erred:
push(evm.stack, U256(0))
else:
evm.logs += child_evm.logs
push(evm.stack, U256(1))
actual_output_size = min(memory_output_size, U256(len(child_evm.output)))
memory_write(evm.memory, memory_output_start_position, child_evm.output[:actual_output_size])
evm.gas_left += child_evm.gas_left
child_evm.gas_left = U256(0)
</DeepExtract>
evm.pc += 1
|
def delegatecall(evm: Evm) -> None:
"""
Message-call into an account.
Parameters
----------
evm :
The current EVM frame.
"""
gas = Uint(pop(evm.stack))
code_address = to_address(pop(evm.stack))
memory_input_start_position = pop(evm.stack)
memory_input_size = pop(evm.stack)
memory_output_start_position = pop(evm.stack)
memory_output_size = pop(evm.stack)
extend_memory = calculate_gas_extend_memory(evm.memory, [(memory_input_start_position, memory_input_size), (memory_output_start_position, memory_output_size)])
charge_gas(evm, GAS_CALL + gas + extend_memory.cost)
evm.memory += b'\x00' * extend_memory.expand_by
from ...vm.interpreter import STACK_DEPTH_LIMIT, process_message
if evm.message.depth + 1 > STACK_DEPTH_LIMIT:
evm.gas_left += gas
push(evm.stack, U256(0))
return
call_data = memory_read_bytes(evm.memory, memory_input_start_position, memory_input_size)
code = get_account(evm.env.state, code_address).code
child_message = Message(caller=evm.message.caller, target=evm.message.current_target, gas=U256(gas), value=evm.message.value, data=call_data, code=code, current_target=evm.message.current_target, depth=evm.message.depth + 1, code_address=code_address, should_transfer_value=False)
child_evm = process_message(child_message, evm.env)
evm.children.append(child_evm)
if child_evm.has_erred:
push(evm.stack, U256(0))
else:
evm.logs += child_evm.logs
push(evm.stack, U256(1))
actual_output_size = min(memory_output_size, U256(len(child_evm.output)))
memory_write(evm.memory, memory_output_start_position, child_evm.output[:actual_output_size])
evm.gas_left += child_evm.gas_left
child_evm.gas_left = U256(0)
evm.pc += 1
|
eth1.0-specs
|
positive
|
def run_forever(self, base_interval: float=1.0) -> None:
"""Loop and call Steam.run_callbacks in specified interval
:param base_interval: float
:return: None
"""
while True:
<DeepExtract>
if not self.loaded():
raise SteamNotLoadedException('STEAMWORKS not yet loaded')
self.RunCallbacks()
return True
</DeepExtract>
time.sleep(base_interval)
|
def run_forever(self, base_interval: float=1.0) -> None:
"""Loop and call Steam.run_callbacks in specified interval
:param base_interval: float
:return: None
"""
while True:
if not self.loaded():
raise SteamNotLoadedException('STEAMWORKS not yet loaded')
self.RunCallbacks()
return True
time.sleep(base_interval)
|
armcom
|
positive
|
def __iter__(self):
<DeepExtract>
self._num_yielded = 0
self.indices = np.arange(len(self.dataset))
if self.shuffle is True:
np.random.shuffle(self.indices)
</DeepExtract>
return self
|
def __iter__(self):
self._num_yielded = 0
self.indices = np.arange(len(self.dataset))
if self.shuffle is True:
np.random.shuffle(self.indices)
return self
|
doctr
|
positive
|
def test_conv2d():
<DeepExtract>
(t, y) = (torch.randn((), device=device), torch.randn(size=(batch_size, c, h, w), device=device))
(t, y) = (t, y)
</DeepExtract>
layer = diffeq_layers.ConcatConv2d(in_channels=y.shape[1], out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)
params = layer.make_initial_params()
out = layer(t, y, params)
layer_ref = nn.Conv2d(in_channels=y.shape[1] + 1, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)
out_ref = layer_ref(utils.channel_cat(t, y))
assert out.size() == out_ref.size()
|
def test_conv2d():
(t, y) = (torch.randn((), device=device), torch.randn(size=(batch_size, c, h, w), device=device))
(t, y) = (t, y)
layer = diffeq_layers.ConcatConv2d(in_channels=y.shape[1], out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)
params = layer.make_initial_params()
out = layer(t, y, params)
layer_ref = nn.Conv2d(in_channels=y.shape[1] + 1, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)
out_ref = layer_ref(utils.channel_cat(t, y))
assert out.size() == out_ref.size()
|
bayesian-sde
|
positive
|
def test_list_identity_udp(self):
with client.connector(host=self.enip_server_udp.addr, port=self.enip_server_udp.port, timeout=4.0, udp=True, broadcast=True) as connection:
connection.list_identity()
<DeepExtract>
(response, _) = client.await_response(connection, timeout=4.0)
response = response['enip']['CIP']['list_identity']['CPF']
</DeepExtract>
expected = self.enip_server_tcp.config.product_name
self.assertEqual(expected, response['item'][0]['identity_object']['product_name'])
|
def test_list_identity_udp(self):
with client.connector(host=self.enip_server_udp.addr, port=self.enip_server_udp.port, timeout=4.0, udp=True, broadcast=True) as connection:
connection.list_identity()
(response, _) = client.await_response(connection, timeout=4.0)
response = response['enip']['CIP']['list_identity']['CPF']
expected = self.enip_server_tcp.config.product_name
self.assertEqual(expected, response['item'][0]['identity_object']['product_name'])
|
conpot
|
positive
|
def backward_G(self):
fake_logit = self.nets['netD'](self.fake)
<DeepExtract>
real_feature_map = self.pretrained(self.real)
fake_feature_map = self.pretrained(self.fake)
anime_feature_map = self.pretrained(self.anime_gray)
c_loss = self.criterionL1(real_feature_map, fake_feature_map)
s_loss = self.style_loss(anime_feature_map, fake_feature_map)
(c_loss, s_loss) = (c_loss, s_loss)
</DeepExtract>
c_loss = self.cfg.con_weight * c_loss
s_loss = self.cfg.sty_weight * s_loss
tv_loss = self.cfg.tv_weight * self.variation_loss(self.fake)
col_loss = self.cfg.color_weight * self.color_loss(self.real, self.fake)
g_loss = self.cfg.g_adv_weight * self.criterionGAN(fake_logit, True)
self.loss_G = c_loss + s_loss + col_loss + g_loss + tv_loss
self.loss_G.backward()
self.losses['g_loss'] = self.loss_G
self.losses['c_loss'] = c_loss
self.losses['s_loss'] = s_loss
self.losses['col_loss'] = col_loss
self.losses['tv_loss'] = tv_loss
|
def backward_G(self):
fake_logit = self.nets['netD'](self.fake)
real_feature_map = self.pretrained(self.real)
fake_feature_map = self.pretrained(self.fake)
anime_feature_map = self.pretrained(self.anime_gray)
c_loss = self.criterionL1(real_feature_map, fake_feature_map)
s_loss = self.style_loss(anime_feature_map, fake_feature_map)
(c_loss, s_loss) = (c_loss, s_loss)
c_loss = self.cfg.con_weight * c_loss
s_loss = self.cfg.sty_weight * s_loss
tv_loss = self.cfg.tv_weight * self.variation_loss(self.fake)
col_loss = self.cfg.color_weight * self.color_loss(self.real, self.fake)
g_loss = self.cfg.g_adv_weight * self.criterionGAN(fake_logit, True)
self.loss_G = c_loss + s_loss + col_loss + g_loss + tv_loss
self.loss_G.backward()
self.losses['g_loss'] = self.loss_G
self.losses['c_loss'] = c_loss
self.losses['s_loss'] = s_loss
self.losses['col_loss'] = col_loss
self.losses['tv_loss'] = tv_loss
|
-AI-emmmm
|
positive
|
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
(arg_shapes, _, aux_shapes) = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, 'shape inference failed'
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
(arg_types, _, aux_types) = self.symbol.infer_type(**input_types)
assert arg_types is not None, 'type inference failed'
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shape) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names:
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else:
<DeepExtract>
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]):
assert arg_arr.dtype == arg_types[j]
arg_arr = arg_arr.reshape(arg_shapes[j])
else:
self.logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays[name] = arg_arr
arg_arr = arg_arr
</DeepExtract>
if self.grad_req[name] != 'null':
<DeepExtract>
if 'grad of ' + name in shared_data_arrays:
arg_arr = shared_data_arrays['grad of ' + name]
if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]):
assert arg_arr.dtype == arg_types[j]
arg_arr = arg_arr.reshape(arg_shapes[j])
else:
self.logger.warning('bucketing: data "%s" has a shape %s' % ('grad of ' + name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays['grad of ' + name] = arg_arr
else:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays['grad of ' + name] = arg_arr
grad_arrays['grad of ' + name] = arg_arr
</DeepExtract>
arg_arrays.append(arg_arr)
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for (s, t) in zip(aux_shapes, aux_types)]
else:
for (j, arr) in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=self.grad_req, shared_exec=shared_exec)
return executor
|
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
(arg_shapes, _, aux_shapes) = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, 'shape inference failed'
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
(arg_types, _, aux_types) = self.symbol.infer_type(**input_types)
assert arg_types is not None, 'type inference failed'
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shape) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names:
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else:
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]):
assert arg_arr.dtype == arg_types[j]
arg_arr = arg_arr.reshape(arg_shapes[j])
else:
self.logger.warning('bucketing: data "%s" has a shape %s' % (name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays[name] = arg_arr
arg_arr = arg_arr
if self.grad_req[name] != 'null':
if 'grad of ' + name in shared_data_arrays:
arg_arr = shared_data_arrays['grad of ' + name]
if np.prod(arg_arr.shape) >= np.prod(arg_shapes[j]):
assert arg_arr.dtype == arg_types[j]
arg_arr = arg_arr.reshape(arg_shapes[j])
else:
self.logger.warning('bucketing: data "%s" has a shape %s' % ('grad of ' + name, arg_shapes[j]) + ', which is larger than already allocated ' + 'shape %s' % (arg_arr.shape,) + '. Need to re-allocate. Consider putting ' + 'default_bucket_key to' + ' be the bucket taking the largest input for better ' + 'memory sharing.')
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays['grad of ' + name] = arg_arr
else:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
shared_data_arrays['grad of ' + name] = arg_arr
grad_arrays['grad of ' + name] = arg_arr
arg_arrays.append(arg_arr)
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for (s, t) in zip(aux_shapes, aux_types)]
else:
for (j, arr) in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=self.grad_req, shared_exec=shared_exec)
return executor
|
Accel
|
positive
|
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = adj.device
if sparse:
<DeepExtract>
if is_sparse_tensor(adj):
values = adj._values()
indices = adj._indices()
adj = sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=adj.shape)
else:
indices = adj.nonzero().t()
values = adj[indices[0], indices[1]]
adj = sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=adj.shape)
</DeepExtract>
<DeepExtract>
adj = adj.tolil()
if adj[0, 0] == 0:
adj = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
adj = r_mat_inv.dot(adj)
adj = adj
</DeepExtract>
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.0
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
|
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = adj.device
if sparse:
if is_sparse_tensor(adj):
values = adj._values()
indices = adj._indices()
adj = sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=adj.shape)
else:
indices = adj.nonzero().t()
values = adj[indices[0], indices[1]]
adj = sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=adj.shape)
adj = adj.tolil()
if adj[0, 0] == 0:
adj = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
adj = r_mat_inv.dot(adj)
adj = adj
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.0
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
|
DeepRobust
|
positive
|
def build_dir(self, vfs: VfsDatabase, src_path: str, dst_path: str, subset=None, symlink_changed_file=False, do_not_build_archive=False):
print(f'build_node: {dst_path} | {src_path}')
src_files = {}
if isinstance(src_path, bytes):
src_path = src_path.decode('utf-8')
if isinstance(dst_path, bytes):
dst_path = dst_path.decode('utf-8')
wl = [src_path]
while len(wl) > 0:
cpath = wl.pop(0)
print('Process: {}'.format(cpath))
if os.path.isdir(cpath):
cdir = os.listdir(cpath)
for entry in cdir:
wl.append(os.path.join(cpath, entry))
elif os.path.isfile(cpath):
(_, file) = os.path.split(cpath)
(_, ext) = os.path.splitext(file)
if ext == '.deca_sha1sum':
pass
elif file.endswith('.DECA.FILE_LIST.txt'):
v_path = cpath[len(src_path):-len('.DECA.FILE_LIST.txt')].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: DECA.FILE_LIST.txt: {} = {}'.format(v_path, cpath))
elif file.endswith('.ddsc.dds'):
v_path = cpath[len(src_path):-len('.dds')].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: ddsc.dds: {} = {}'.format(v_path, cpath))
elif file.find('DECA') >= 0:
pass
else:
v_path = cpath[len(src_path):].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: default: {} = {}'.format(v_path, cpath))
pack_list = list(src_files.keys())
depends = {}
completed = set()
while len(pack_list) > 0:
v_path = pack_list.pop(0)
print(f'PACKING: {v_path}')
if v_path not in completed:
print(f'COMPLETING: {v_path}')
completed.add(v_path)
depends[v_path] = depends.get(v_path, set())
vnodes = vfs.nodes_where_match(v_path=v_path)
if len(vnodes) == 0:
print('TODO: WARNING: FILE {} NOT HANDLED'.format(v_path))
else:
vnode: VfsNode
for vnode in vnodes:
pid = vnode.pid
if pid is not None:
pnode: VfsNode = vfs.node_where_uid(pid)
if pnode.file_type == FTYPE_GDCBODY:
pid = pnode.pid
pnode = vfs.node_where_uid(pid)
if pnode.file_type != FTYPE_ARC and pnode.file_type != FTYPE_TAB:
if pnode.file_type is None:
raise EDecaBuildError('MISSING VPATH FOR uid:{} hash:{:08X}, when packing {}'.format(pnode.uid, pnode.v_hash, vnode.v_path))
else:
depends[pnode.v_path] = depends.get(pnode.v_path, set()).union({vnode.v_path})
pack_list.append(pnode.v_path)
if subset is not None:
print('CALCULATING SUBSET')
subset_vpaths = set()
for uid in subset:
vnode: VfsNode = vfs.node_where_uid(uid)
subset_vpaths.add(vnode.v_path)
depends_keep = set()
for vpath in subset_vpaths:
deps = depends.get(vpath, None)
if deps is not None:
depends_keep.add(vpath)
depends_keep = depends_keep.union(deps)
depends_remove = [k for k in depends.keys() if k not in depends_keep]
for k in depends_remove:
depends.pop(k, None)
else:
print('SKIPPING SUBSET')
vpaths_completed = {}
while len(depends) > 0:
any_change = False
depends_keys = list(depends.keys())
for v_path in depends_keys:
print(f'check depends: {v_path} | {depends[v_path]}')
if len(depends[v_path]) == 0:
any_change = True
depends.pop(v_path)
v: set
for (k, v) in depends.items():
v.discard(v_path)
fpath = src_files.get(v_path, None)
vnodes = vfs.nodes_where_match(v_path=v_path)
if len(vnodes) == 0:
raise EDecaBuildError('MISSING VPATH when building v_path={} using fpath={}'.format(v_path, fpath))
else:
<DeepExtract>
print(f'build_node: {dst_path} | {fpath} | {vnodes[0].file_type} | {vnodes[0].v_path}')
v_path = vnodes[0].v_path
if fpath is None:
src_file = None
else:
(_, src_file) = os.path.split(fpath)
if vnodes[0].file_type == FTYPE_SARC:
if not do_not_build_archive:
self.build_node_sarc(dst_path, fpath, vnodes[0], vfs, vpaths_completed, symlink_changed_file)
elif fpath is None:
pass
elif src_file.find('DECA') >= 0:
pass
elif src_file.endswith('.ddsc.dds'):
vnodes[0] = vfs.nodes_where_match(v_path=v_path)[0]
compiled_files = image_import(vfs, vnodes[0], fpath, dst_path)
for cfile in compiled_files:
v_path = cfile[0]
dst = cfile[1]
vpaths_completed[v_path] = dst
else:
dst = os.path.join(dst_path, v_path.decode('utf-8'))
dst_dir = os.path.dirname(dst)
os.makedirs(dst_dir, exist_ok=True)
shutil.copy2(fpath, dst)
vpaths_completed[v_path] = dst
</DeepExtract>
if not any_change and len(depends) > 0:
print('BUILD FAILED: Infinite loop:')
print('depends')
pprint(depends)
print('vpaths_completed')
pprint(vpaths_completed)
raise EDecaBuildError('BUILD FAILED\n' + pformat(depends_keys))
print('BUILD SUCCESS:')
for (k, v) in vpaths_completed.items():
print(v)
|
def build_dir(self, vfs: VfsDatabase, src_path: str, dst_path: str, subset=None, symlink_changed_file=False, do_not_build_archive=False):
print(f'build_node: {dst_path} | {src_path}')
src_files = {}
if isinstance(src_path, bytes):
src_path = src_path.decode('utf-8')
if isinstance(dst_path, bytes):
dst_path = dst_path.decode('utf-8')
wl = [src_path]
while len(wl) > 0:
cpath = wl.pop(0)
print('Process: {}'.format(cpath))
if os.path.isdir(cpath):
cdir = os.listdir(cpath)
for entry in cdir:
wl.append(os.path.join(cpath, entry))
elif os.path.isfile(cpath):
(_, file) = os.path.split(cpath)
(_, ext) = os.path.splitext(file)
if ext == '.deca_sha1sum':
pass
elif file.endswith('.DECA.FILE_LIST.txt'):
v_path = cpath[len(src_path):-len('.DECA.FILE_LIST.txt')].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: DECA.FILE_LIST.txt: {} = {}'.format(v_path, cpath))
elif file.endswith('.ddsc.dds'):
v_path = cpath[len(src_path):-len('.dds')].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: ddsc.dds: {} = {}'.format(v_path, cpath))
elif file.find('DECA') >= 0:
pass
else:
v_path = cpath[len(src_path):].encode('ascii')
v_path = v_path.replace(b'\\', b'/')
src_files[v_path] = cpath
print('DEPEND: default: {} = {}'.format(v_path, cpath))
pack_list = list(src_files.keys())
depends = {}
completed = set()
while len(pack_list) > 0:
v_path = pack_list.pop(0)
print(f'PACKING: {v_path}')
if v_path not in completed:
print(f'COMPLETING: {v_path}')
completed.add(v_path)
depends[v_path] = depends.get(v_path, set())
vnodes = vfs.nodes_where_match(v_path=v_path)
if len(vnodes) == 0:
print('TODO: WARNING: FILE {} NOT HANDLED'.format(v_path))
else:
vnode: VfsNode
for vnode in vnodes:
pid = vnode.pid
if pid is not None:
pnode: VfsNode = vfs.node_where_uid(pid)
if pnode.file_type == FTYPE_GDCBODY:
pid = pnode.pid
pnode = vfs.node_where_uid(pid)
if pnode.file_type != FTYPE_ARC and pnode.file_type != FTYPE_TAB:
if pnode.file_type is None:
raise EDecaBuildError('MISSING VPATH FOR uid:{} hash:{:08X}, when packing {}'.format(pnode.uid, pnode.v_hash, vnode.v_path))
else:
depends[pnode.v_path] = depends.get(pnode.v_path, set()).union({vnode.v_path})
pack_list.append(pnode.v_path)
if subset is not None:
print('CALCULATING SUBSET')
subset_vpaths = set()
for uid in subset:
vnode: VfsNode = vfs.node_where_uid(uid)
subset_vpaths.add(vnode.v_path)
depends_keep = set()
for vpath in subset_vpaths:
deps = depends.get(vpath, None)
if deps is not None:
depends_keep.add(vpath)
depends_keep = depends_keep.union(deps)
depends_remove = [k for k in depends.keys() if k not in depends_keep]
for k in depends_remove:
depends.pop(k, None)
else:
print('SKIPPING SUBSET')
vpaths_completed = {}
while len(depends) > 0:
any_change = False
depends_keys = list(depends.keys())
for v_path in depends_keys:
print(f'check depends: {v_path} | {depends[v_path]}')
if len(depends[v_path]) == 0:
any_change = True
depends.pop(v_path)
v: set
for (k, v) in depends.items():
v.discard(v_path)
fpath = src_files.get(v_path, None)
vnodes = vfs.nodes_where_match(v_path=v_path)
if len(vnodes) == 0:
raise EDecaBuildError('MISSING VPATH when building v_path={} using fpath={}'.format(v_path, fpath))
else:
print(f'build_node: {dst_path} | {fpath} | {vnodes[0].file_type} | {vnodes[0].v_path}')
v_path = vnodes[0].v_path
if fpath is None:
src_file = None
else:
(_, src_file) = os.path.split(fpath)
if vnodes[0].file_type == FTYPE_SARC:
if not do_not_build_archive:
self.build_node_sarc(dst_path, fpath, vnodes[0], vfs, vpaths_completed, symlink_changed_file)
elif fpath is None:
pass
elif src_file.find('DECA') >= 0:
pass
elif src_file.endswith('.ddsc.dds'):
vnodes[0] = vfs.nodes_where_match(v_path=v_path)[0]
compiled_files = image_import(vfs, vnodes[0], fpath, dst_path)
for cfile in compiled_files:
v_path = cfile[0]
dst = cfile[1]
vpaths_completed[v_path] = dst
else:
dst = os.path.join(dst_path, v_path.decode('utf-8'))
dst_dir = os.path.dirname(dst)
os.makedirs(dst_dir, exist_ok=True)
shutil.copy2(fpath, dst)
vpaths_completed[v_path] = dst
if not any_change and len(depends) > 0:
print('BUILD FAILED: Infinite loop:')
print('depends')
pprint(depends)
print('vpaths_completed')
pprint(vpaths_completed)
raise EDecaBuildError('BUILD FAILED\n' + pformat(depends_keys))
print('BUILD SUCCESS:')
for (k, v) in vpaths_completed.items():
print(v)
|
deca
|
positive
|
@login_required
def edit(request, member_id, spent_time_factor_id):
member = Member.objects.get(id=member_id)
try:
assert_user_can_edit_member(request.user, member)
except AssertionError:
return HttpResponseForbidden()
spent_time_factor = get_object_or_404(SpentTimeFactor, id=spent_time_factor_id, member=member)
if request.method == 'POST':
form = SpentTimeFactorForm(request.POST, instance=spent_time_factor)
if form.is_valid():
form.save()
<DeepExtract>
for daily_spent_time in member.daily_spent_times.all():
daily_spent_time.update_adjusted_spent_time()
</DeepExtract>
return HttpResponseRedirect(reverse('members:view_spent_time_factors', args=(member.id,)))
else:
form = SpentTimeFactorForm(instance=spent_time_factor)
replacements = {'member': member, 'form': form}
return render(request, 'members/spent_time_factors/edit.html', replacements)
|
@login_required
def edit(request, member_id, spent_time_factor_id):
member = Member.objects.get(id=member_id)
try:
assert_user_can_edit_member(request.user, member)
except AssertionError:
return HttpResponseForbidden()
spent_time_factor = get_object_or_404(SpentTimeFactor, id=spent_time_factor_id, member=member)
if request.method == 'POST':
form = SpentTimeFactorForm(request.POST, instance=spent_time_factor)
if form.is_valid():
form.save()
for daily_spent_time in member.daily_spent_times.all():
daily_spent_time.update_adjusted_spent_time()
return HttpResponseRedirect(reverse('members:view_spent_time_factors', args=(member.id,)))
else:
form = SpentTimeFactorForm(instance=spent_time_factor)
replacements = {'member': member, 'form': form}
return render(request, 'members/spent_time_factors/edit.html', replacements)
|
djanban
|
positive
|
def BilinearInterpolation(self, blob_in, blob_out, dim_in, dim_out, up_scale):
"""Bilinear interpolation in space of scale.
Takes input of NxKxHxW and outputs NxKx(sH)x(sW), where s:= up_scale
Adapted from the CVPR'15 FCN code.
See: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
"""
assert dim_in == dim_out
assert up_scale % 2 == 0, 'Scale should be even'
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
kernel_size = up_scale * 2
<DeepExtract>
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
bil_filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
</DeepExtract>
kernel = np.zeros((dim_in, dim_out, kernel_size, kernel_size), dtype=np.float32)
kernel[range(dim_out), range(dim_in), :, :] = bil_filt
blob = self.ConvTranspose(blob_in, blob_out, dim_in, dim_out, kernel_size, stride=int(up_scale), pad=int(up_scale / 2), weight_init=('GivenTensorFill', {'values': kernel}), bias_init=('ConstantFill', {'value': 0.0}))
self.do_not_update_params.append(self.weights[-1])
self.do_not_update_params.append(self.biases[-1])
return blob
|
def BilinearInterpolation(self, blob_in, blob_out, dim_in, dim_out, up_scale):
"""Bilinear interpolation in space of scale.
Takes input of NxKxHxW and outputs NxKx(sH)x(sW), where s:= up_scale
Adapted from the CVPR'15 FCN code.
See: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
"""
assert dim_in == dim_out
assert up_scale % 2 == 0, 'Scale should be even'
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
kernel_size = up_scale * 2
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
bil_filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
kernel = np.zeros((dim_in, dim_out, kernel_size, kernel_size), dtype=np.float32)
kernel[range(dim_out), range(dim_in), :, :] = bil_filt
blob = self.ConvTranspose(blob_in, blob_out, dim_in, dim_out, kernel_size, stride=int(up_scale), pad=int(up_scale / 2), weight_init=('GivenTensorFill', {'values': kernel}), bias_init=('ConstantFill', {'value': 0.0}))
self.do_not_update_params.append(self.weights[-1])
self.do_not_update_params.append(self.biases[-1])
return blob
|
Detectron
|
positive
|
@parse_debug
def parse_expression_2_rest(self):
parts = list()
token = self.tokens.look()
while token.value in Operator.INFIX or token.value == 'instanceof':
if self.try_accept('instanceof'):
<DeepExtract>
java_type = None
if isinstance(self.tokens.look(), BasicType):
java_type = self.parse_basic_type()
elif isinstance(self.tokens.look(), Identifier):
java_type = self.parse_reference_type()
else:
self.illegal('Expected type')
java_type.dimensions = self.parse_array_dimension()
comparison_type = java_type
</DeepExtract>
parts.extend(('instanceof', comparison_type))
else:
<DeepExtract>
operator = self.accept(Operator)
if not operator in Operator.INFIX:
self.illegal('Expected infix operator')
if operator == '>' and self.try_accept('>'):
operator = '>>'
if self.try_accept('>'):
operator = '>>>'
operator = operator
</DeepExtract>
<DeepExtract>
prefix_operators = list()
while self.tokens.look().value in Operator.PREFIX:
prefix_operators.append(self.tokens.next().value)
if self.would_accept('('):
try:
with self.tokens:
lambda_exp = self.parse_lambda_expression()
if lambda_exp:
expression = lambda_exp
except JavaSyntaxError:
pass
try:
with self.tokens:
self.accept('(')
cast_target = self.parse_type()
self.accept(')')
expression = self.parse_expression_3()
expression = tree.Cast(type=cast_target, expression=expression)
except JavaSyntaxError:
pass
primary = self.parse_primary()
primary.prefix_operators = prefix_operators
primary.selectors = list()
primary.postfix_operators = list()
token = self.tokens.look()
while token.value in '[.':
selector = self.parse_selector()
primary.selectors.append(selector)
token = self.tokens.look()
while token.value in Operator.POSTFIX:
primary.postfix_operators.append(self.tokens.next().value)
token = self.tokens.look()
expression = primary
</DeepExtract>
parts.extend((operator, expression))
token = self.tokens.look()
return parts
|
@parse_debug
def parse_expression_2_rest(self):
parts = list()
token = self.tokens.look()
while token.value in Operator.INFIX or token.value == 'instanceof':
if self.try_accept('instanceof'):
java_type = None
if isinstance(self.tokens.look(), BasicType):
java_type = self.parse_basic_type()
elif isinstance(self.tokens.look(), Identifier):
java_type = self.parse_reference_type()
else:
self.illegal('Expected type')
java_type.dimensions = self.parse_array_dimension()
comparison_type = java_type
parts.extend(('instanceof', comparison_type))
else:
operator = self.accept(Operator)
if not operator in Operator.INFIX:
self.illegal('Expected infix operator')
if operator == '>' and self.try_accept('>'):
operator = '>>'
if self.try_accept('>'):
operator = '>>>'
operator = operator
prefix_operators = list()
while self.tokens.look().value in Operator.PREFIX:
prefix_operators.append(self.tokens.next().value)
if self.would_accept('('):
try:
with self.tokens:
lambda_exp = self.parse_lambda_expression()
if lambda_exp:
expression = lambda_exp
except JavaSyntaxError:
pass
try:
with self.tokens:
self.accept('(')
cast_target = self.parse_type()
self.accept(')')
expression = self.parse_expression_3()
expression = tree.Cast(type=cast_target, expression=expression)
except JavaSyntaxError:
pass
primary = self.parse_primary()
primary.prefix_operators = prefix_operators
primary.selectors = list()
primary.postfix_operators = list()
token = self.tokens.look()
while token.value in '[.':
selector = self.parse_selector()
primary.selectors.append(selector)
token = self.tokens.look()
while token.value in Operator.POSTFIX:
primary.postfix_operators.append(self.tokens.next().value)
token = self.tokens.look()
expression = primary
parts.extend((operator, expression))
token = self.tokens.look()
return parts
|
code-transformer
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits((d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res = 2147483647 & sc | (d_a & 65535) * (d_b & 65535) << n.value & (sc ^ 65535)
result = d_d + mul_res
c = 0
v = overflow(result)
av = advanced_overflow(result)
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits((d_a & 65535 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res = 2147483647 & sc | (d_a & 65535) * (d_b & 65535) << n.value & (sc ^ 65535)
result = d_d + mul_res
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
return result
|
angr-platforms
|
positive
|
def write_smtlib(file: typing.TextIO, exprs: typing.List[Node]):
"""Write the given expressions to the given file object
Honor options to wrap lines or pretty-print."""
from . import options
if options.args().pretty_print:
for expr in exprs:
<DeepExtract>
visit = [(expr, False)]
indent = ''
while visit:
(ex, visited) = visit.pop()
if ex.is_leaf():
assert isinstance(ex.data, str)
if ex.data == '':
continue
if ex.data[0] == ';':
file.write(f'\n{ex.data}\n')
else:
file.write(f'{indent}{ex.data}\n')
continue
if visited:
indent = indent[:-2]
file.write(f'{indent})\n')
elif all(map(lambda n: n.is_leaf(), ex.data)):
file.write(f'{indent}{ex}\n')
else:
visit.append((ex, True))
if ex.has_ident():
file.write(f'{indent}({ex.data[0]}\n')
visit.extend(((x, False) for x in reversed(ex.data[1:])))
else:
file.write(f'{indent}(\n')
visit.extend(((x, False) for x in reversed(ex.data)))
indent += ' '
</DeepExtract>
else:
lines = [__write_smtlib_str(expr) for expr in exprs]
if options.args().wrap_lines:
lines = map(lambda line: textwrap.wrap(line, width=78, subsequent_indent=' '), lines)
lines = [sub for line in lines for sub in line]
for line in lines:
file.write(line)
file.write('\n')
|
def write_smtlib(file: typing.TextIO, exprs: typing.List[Node]):
"""Write the given expressions to the given file object
Honor options to wrap lines or pretty-print."""
from . import options
if options.args().pretty_print:
for expr in exprs:
visit = [(expr, False)]
indent = ''
while visit:
(ex, visited) = visit.pop()
if ex.is_leaf():
assert isinstance(ex.data, str)
if ex.data == '':
continue
if ex.data[0] == ';':
file.write(f'\n{ex.data}\n')
else:
file.write(f'{indent}{ex.data}\n')
continue
if visited:
indent = indent[:-2]
file.write(f'{indent})\n')
elif all(map(lambda n: n.is_leaf(), ex.data)):
file.write(f'{indent}{ex}\n')
else:
visit.append((ex, True))
if ex.has_ident():
file.write(f'{indent}({ex.data[0]}\n')
visit.extend(((x, False) for x in reversed(ex.data[1:])))
else:
file.write(f'{indent}(\n')
visit.extend(((x, False) for x in reversed(ex.data)))
indent += ' '
else:
lines = [__write_smtlib_str(expr) for expr in exprs]
if options.args().wrap_lines:
lines = map(lambda line: textwrap.wrap(line, width=78, subsequent_indent=' '), lines)
lines = [sub for line in lines for sub in line]
for line in lines:
file.write(line)
file.write('\n')
|
ddSMT
|
positive
|
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
<DeepExtract>
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for (i, module) in enumerate(modules):
for (j, m) in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
</DeepExtract>
return modules
data_parallel.replicate = new_replicate
|
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for (i, module) in enumerate(modules):
for (j, m) in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
return modules
data_parallel.replicate = new_replicate
|
DeepSEE
|
positive
|
def set_render_params(self, **kwargs):
<DeepExtract>
if isinstance(kwargs.get('outline'), str):
self.canvas.setStrokeColor(kwargs.get('outline'))
elif kwargs.get('outline'):
rgb = (kwargs.get('outline')[0] / 256.0, kwargs.get('outline')[1] / 256.0, kwargs.get('outline')[2] / 256.0)
self.canvas.setStrokeColorRGB(*rgb)
else:
self.set_stroke_color()
</DeepExtract>
<DeepExtract>
if isinstance(kwargs.get('fill', 'none'), str):
if kwargs.get('fill', 'none') != 'none':
self.canvas.setFillColor(kwargs.get('fill', 'none'))
elif kwargs.get('fill', 'none'):
rgb = (kwargs.get('fill', 'none')[0] / 256.0, kwargs.get('fill', 'none')[1] / 256.0, kwargs.get('fill', 'none')[2] / 256.0)
self.canvas.setFillColorRGB(*rgb)
else:
self.set_fill_color()
</DeepExtract>
<DeepExtract>
if kwargs.get('thick') is None:
kwargs.get('thick') = 1
if kwargs.get('style') == 'dotted':
self.canvas.setDash([2 * kwargs.get('thick'), 2 * kwargs.get('thick')])
elif kwargs.get('style') == 'dashed':
self.canvas.setDash([4 * kwargs.get('thick'), 4 * kwargs.get('thick')])
elif kwargs.get('style') == 'none':
self.canvas.setDash([0, 65535 * kwargs.get('thick')])
elif re.search('^\\d+(,\\d+)*$', kwargs.get('style') or ''):
self.canvas.setDash([int(n) * kwargs.get('thick') for n in kwargs.get('style').split(',')])
else:
self.canvas.setDash()
</DeepExtract>
params = {}
if kwargs.get('fill', 'none') == 'none':
params['fill'] = 0
else:
params['fill'] = 1
if kwargs.get('outline') is None:
params['stroke'] = 0
else:
params['stroke'] = 1
return params
|
def set_render_params(self, **kwargs):
if isinstance(kwargs.get('outline'), str):
self.canvas.setStrokeColor(kwargs.get('outline'))
elif kwargs.get('outline'):
rgb = (kwargs.get('outline')[0] / 256.0, kwargs.get('outline')[1] / 256.0, kwargs.get('outline')[2] / 256.0)
self.canvas.setStrokeColorRGB(*rgb)
else:
self.set_stroke_color()
if isinstance(kwargs.get('fill', 'none'), str):
if kwargs.get('fill', 'none') != 'none':
self.canvas.setFillColor(kwargs.get('fill', 'none'))
elif kwargs.get('fill', 'none'):
rgb = (kwargs.get('fill', 'none')[0] / 256.0, kwargs.get('fill', 'none')[1] / 256.0, kwargs.get('fill', 'none')[2] / 256.0)
self.canvas.setFillColorRGB(*rgb)
else:
self.set_fill_color()
if kwargs.get('thick') is None:
kwargs.get('thick') = 1
if kwargs.get('style') == 'dotted':
self.canvas.setDash([2 * kwargs.get('thick'), 2 * kwargs.get('thick')])
elif kwargs.get('style') == 'dashed':
self.canvas.setDash([4 * kwargs.get('thick'), 4 * kwargs.get('thick')])
elif kwargs.get('style') == 'none':
self.canvas.setDash([0, 65535 * kwargs.get('thick')])
elif re.search('^\\d+(,\\d+)*$', kwargs.get('style') or ''):
self.canvas.setDash([int(n) * kwargs.get('thick') for n in kwargs.get('style').split(',')])
else:
self.canvas.setDash()
params = {}
if kwargs.get('fill', 'none') == 'none':
params['fill'] = 0
else:
params['fill'] = 1
if kwargs.get('outline') is None:
params['stroke'] = 0
else:
params['stroke'] = 1
return params
|
blockdiag
|
positive
|
def view(self):
self.current_index = 0
if self.out_dir is None:
<DeepExtract>
dpi = 80
w = 16
h = 9
self.fig = plt.figure(figsize=(w, h), dpi=dpi)
self.ax = self.fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
if len(self.image_paths) > 1:
plt.connect('key_release_event', self.next_image)
self.show_image()
plt.show()
</DeepExtract>
else:
<DeepExtract>
dpi = 80
w = 16
h = 9
self.fig = plt.figure(figsize=(w, h), dpi=dpi)
self.ax = self.fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
out_paths = []
for i in range(len(self.image_paths)):
self.current_index = i
out_name = splitext(split(self.image_paths[i])[1])[0] + '.png'
out_path = join(self.out_dir, out_name)
if self.show_image():
self.fig.savefig(out_path, dpi=dpi)
out_paths.append(out_path)
if self.with_post:
print('Post-processing')
p = Pool(10)
if self.instance_mode:
p.map(convert_instance_rgb, out_paths)
if self.drivable_mode:
p = Pool(10)
p.map(convert_drivable_rgb, out_paths)
</DeepExtract>
|
def view(self):
self.current_index = 0
if self.out_dir is None:
dpi = 80
w = 16
h = 9
self.fig = plt.figure(figsize=(w, h), dpi=dpi)
self.ax = self.fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
if len(self.image_paths) > 1:
plt.connect('key_release_event', self.next_image)
self.show_image()
plt.show()
else:
dpi = 80
w = 16
h = 9
self.fig = plt.figure(figsize=(w, h), dpi=dpi)
self.ax = self.fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
out_paths = []
for i in range(len(self.image_paths)):
self.current_index = i
out_name = splitext(split(self.image_paths[i])[1])[0] + '.png'
out_path = join(self.out_dir, out_name)
if self.show_image():
self.fig.savefig(out_path, dpi=dpi)
out_paths.append(out_path)
if self.with_post:
print('Post-processing')
p = Pool(10)
if self.instance_mode:
p.map(convert_instance_rgb, out_paths)
if self.drivable_mode:
p = Pool(10)
p.map(convert_drivable_rgb, out_paths)
</DeepExtract>
|
3d-vehicle-tracking
|
positive
|
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
(width, height) = image.size
(sw, sh) = (math.ceil(width * scale), math.ceil(height * scale))
img = image.resize((sw, sh), Image.BILINEAR)
img = np.asarray(img, 'float32')
with torch.no_grad():
img = Variable(torch.FloatTensor(_preprocess(img)), volatile=True)
output = net(img.to('cuda'))
probs = output[1].cpu().data.numpy()[0, 1, :, :]
offsets = output[0].cpu().data.numpy()
<DeepExtract>
stride = 2
cell_size = 12
inds = np.where(probs > threshold)
if inds[0].size == 0:
boxes = np.array([])
(tx1, ty1, tx2, ty2) = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
bounding_boxes = np.vstack([np.round((stride * inds[1] + 1.0) / scale), np.round((stride * inds[0] + 1.0) / scale), np.round((stride * inds[1] + 1.0 + cell_size) / scale), np.round((stride * inds[0] + 1.0 + cell_size) / scale), score, offsets])
boxes = bounding_boxes.T
</DeepExtract>
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
|
def run_first_stage(image, net, scale, threshold):
"""Run P-Net, generate bounding boxes, and do NMS.
Arguments:
image: an instance of PIL.Image.
net: an instance of pytorch's nn.Module, P-Net.
scale: a float number,
scale width and height of the image by this number.
threshold: a float number,
threshold on the probability of a face when generating
bounding boxes from predictions of the net.
Returns:
a float numpy array of shape [n_boxes, 9],
bounding boxes with scores and offsets (4 + 1 + 4).
"""
(width, height) = image.size
(sw, sh) = (math.ceil(width * scale), math.ceil(height * scale))
img = image.resize((sw, sh), Image.BILINEAR)
img = np.asarray(img, 'float32')
with torch.no_grad():
img = Variable(torch.FloatTensor(_preprocess(img)), volatile=True)
output = net(img.to('cuda'))
probs = output[1].cpu().data.numpy()[0, 1, :, :]
offsets = output[0].cpu().data.numpy()
stride = 2
cell_size = 12
inds = np.where(probs > threshold)
if inds[0].size == 0:
boxes = np.array([])
(tx1, ty1, tx2, ty2) = [offsets[0, i, inds[0], inds[1]] for i in range(4)]
offsets = np.array([tx1, ty1, tx2, ty2])
score = probs[inds[0], inds[1]]
bounding_boxes = np.vstack([np.round((stride * inds[1] + 1.0) / scale), np.round((stride * inds[0] + 1.0) / scale), np.round((stride * inds[1] + 1.0 + cell_size) / scale), np.round((stride * inds[0] + 1.0 + cell_size) / scale), score, offsets])
boxes = bounding_boxes.T
if len(boxes) == 0:
return None
keep = nms(boxes[:, 0:5], overlap_threshold=0.5)
return boxes[keep]
|
Cross-Resolution-Face-Recognition
|
positive
|
def _gaussian_acquisition(X, model, y_opt=None, acq_func='LCB', return_grad=False, acq_func_kwargs=None):
"""
Wrapper so that the output of this function can be
directly passed to a minimizer.
"""
X = np.asarray(X)
if X.ndim != 2:
raise ValueError('X is {}-dimensional, however, it must be 2-dimensional.'.format(X.ndim))
if acq_func_kwargs is None:
acq_func_kwargs = dict()
xi = acq_func_kwargs.get('xi', 0.01)
kappa = acq_func_kwargs.get('kappa', 1.96)
per_second = acq_func.endswith('ps')
if per_second:
(model, time_model) = model.estimators_
if acq_func == 'LCB':
<DeepExtract>
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
if kappa == 'inf':
func_and_grad = (-std, -std_grad)
func_and_grad = (mu - kappa * std, mu_grad - kappa * std_grad)
else:
(mu, std) = model.predict(X, return_std=True)
if kappa == 'inf':
func_and_grad = -std
func_and_grad = mu - kappa * std
</DeepExtract>
if return_grad:
(acq_vals, acq_grad) = func_and_grad
else:
acq_vals = func_and_grad
elif acq_func in ['EI', 'PI', 'EIps', 'PIps']:
if acq_func in ['EI', 'EIps']:
<DeepExtract>
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = model.predict(X, return_std=True)
if mu.ndim != 1 or std.ndim != 1:
raise ValueError('mu and std are {}-dimensional and {}-dimensional, however both must be 1-dimensional. Did you train your model with an (N, 1) vector instead of an (N,) vector?'.format(mu.ndim, std.ndim))
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
cdf = norm.cdf(scaled)
pdf = norm.pdf(scaled)
exploit = improve * cdf
explore = std[mask] * pdf
values[mask] = exploit + explore
if return_grad:
if not np.all(mask):
func_and_grad = (values, np.zeros_like(std_grad))
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std ** 2
cdf_grad = improve_grad * pdf
pdf_grad = -improve * cdf_grad
exploit_grad = -mu_grad * cdf - pdf_grad
explore_grad = std_grad * pdf + pdf_grad
grad = exploit_grad + explore_grad
func_and_grad = (values, grad)
func_and_grad = values
</DeepExtract>
else:
<DeepExtract>
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = model.predict(X, return_std=True)
if mu.ndim != 1 or std.ndim != 1:
raise ValueError('mu and std are {}-dimensional and {}-dimensional, however both must be 1-dimensional. Did you train your model with an (N, 1) vector instead of an (N,) vector?'.format(mu.ndim, std.ndim))
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
values[mask] = norm.cdf(scaled)
if return_grad:
if not np.all(mask):
func_and_grad = (values, np.zeros_like(std_grad))
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std ** 2
func_and_grad = (values, improve_grad * norm.pdf(scaled))
func_and_grad = values
</DeepExtract>
if return_grad:
acq_vals = -func_and_grad[0]
acq_grad = -func_and_grad[1]
else:
acq_vals = -func_and_grad
if acq_func in ['EIps', 'PIps']:
if return_grad:
(mu, std, mu_grad, std_grad) = time_model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = time_model.predict(X, return_std=True)
inv_t = np.exp(-mu + 0.5 * std ** 2)
acq_vals *= inv_t
if return_grad:
acq_grad *= inv_t
acq_grad += acq_vals * (-mu_grad + std * std_grad)
else:
raise ValueError('Acquisition function not implemented.')
if return_grad:
return (acq_vals, acq_grad)
return acq_vals
|
def _gaussian_acquisition(X, model, y_opt=None, acq_func='LCB', return_grad=False, acq_func_kwargs=None):
"""
Wrapper so that the output of this function can be
directly passed to a minimizer.
"""
X = np.asarray(X)
if X.ndim != 2:
raise ValueError('X is {}-dimensional, however, it must be 2-dimensional.'.format(X.ndim))
if acq_func_kwargs is None:
acq_func_kwargs = dict()
xi = acq_func_kwargs.get('xi', 0.01)
kappa = acq_func_kwargs.get('kappa', 1.96)
per_second = acq_func.endswith('ps')
if per_second:
(model, time_model) = model.estimators_
if acq_func == 'LCB':
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
if kappa == 'inf':
func_and_grad = (-std, -std_grad)
func_and_grad = (mu - kappa * std, mu_grad - kappa * std_grad)
else:
(mu, std) = model.predict(X, return_std=True)
if kappa == 'inf':
func_and_grad = -std
func_and_grad = mu - kappa * std
if return_grad:
(acq_vals, acq_grad) = func_and_grad
else:
acq_vals = func_and_grad
elif acq_func in ['EI', 'PI', 'EIps', 'PIps']:
if acq_func in ['EI', 'EIps']:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = model.predict(X, return_std=True)
if mu.ndim != 1 or std.ndim != 1:
raise ValueError('mu and std are {}-dimensional and {}-dimensional, however both must be 1-dimensional. Did you train your model with an (N, 1) vector instead of an (N,) vector?'.format(mu.ndim, std.ndim))
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
cdf = norm.cdf(scaled)
pdf = norm.pdf(scaled)
exploit = improve * cdf
explore = std[mask] * pdf
values[mask] = exploit + explore
if return_grad:
if not np.all(mask):
func_and_grad = (values, np.zeros_like(std_grad))
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std ** 2
cdf_grad = improve_grad * pdf
pdf_grad = -improve * cdf_grad
exploit_grad = -mu_grad * cdf - pdf_grad
explore_grad = std_grad * pdf + pdf_grad
grad = exploit_grad + explore_grad
func_and_grad = (values, grad)
func_and_grad = values
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if return_grad:
(mu, std, mu_grad, std_grad) = model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = model.predict(X, return_std=True)
if mu.ndim != 1 or std.ndim != 1:
raise ValueError('mu and std are {}-dimensional and {}-dimensional, however both must be 1-dimensional. Did you train your model with an (N, 1) vector instead of an (N,) vector?'.format(mu.ndim, std.ndim))
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
values[mask] = norm.cdf(scaled)
if return_grad:
if not np.all(mask):
func_and_grad = (values, np.zeros_like(std_grad))
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std ** 2
func_and_grad = (values, improve_grad * norm.pdf(scaled))
func_and_grad = values
if return_grad:
acq_vals = -func_and_grad[0]
acq_grad = -func_and_grad[1]
else:
acq_vals = -func_and_grad
if acq_func in ['EIps', 'PIps']:
if return_grad:
(mu, std, mu_grad, std_grad) = time_model.predict(X, return_std=True, return_mean_grad=True, return_std_grad=True)
else:
(mu, std) = time_model.predict(X, return_std=True)
inv_t = np.exp(-mu + 0.5 * std ** 2)
acq_vals *= inv_t
if return_grad:
acq_grad *= inv_t
acq_grad += acq_vals * (-mu_grad + std * std_grad)
else:
raise ValueError('Acquisition function not implemented.')
if return_grad:
return (acq_vals, acq_grad)
return acq_vals
|
deephyper
|
positive
|
def _load_segment(f, offset):
"""
Handles LC_SEGMENT(_64) commands
:param f: input file
:param offset: starting offset of the LC_SEGMENT command
:return:
"""
is64 = self.arch.bits == 64
if not is64:
segment_s_size = 56
<DeepExtract>
(_, _, segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags) = self._unpack_with_byteorder('2I16s8I', self._read(f, offset, segment_s_size))
</DeepExtract>
else:
segment_s_size = 72
<DeepExtract>
(_, _, segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags) = self._unpack_with_byteorder('2I16s4Q4I', self._read(f, offset, segment_s_size))
</DeepExtract>
segname = segname.replace(b'\x00', b'')
log.debug('Processing segment %r', segname)
seg = MachOSegment(fileoff, vmaddr, filesize, vmsize, segname, nsects, [], flags, initprot, maxprot)
if not is64:
section_s_size = 68
section_s_packstr = '16s16s9I'
else:
section_s_size = 80
section_s_packstr = '16s16s2Q6IQ'
section_start = offset + segment_s_size
for i in range(0, nsects):
log.debug('Processing section # %d in %r', i + 1, segname)
<DeepExtract>
(section_sectname, section_segname, section_vaddr, section_vsize, section_foff, section_align, section_reloff, section_nreloc, section_flags, r1, r2) = self._unpack_with_byteorder(section_s_packstr, self._read(f, i * section_s_size + section_start, section_s_size))
</DeepExtract>
section_sectname = section_sectname.replace(b'\x00', b'')
section_segname = section_segname.replace(b'\x00', b'')
sec = MachOSection(section_foff, section_vaddr, section_vsize, section_vsize, section_segname, section_sectname, section_align, section_reloff, section_nreloc, section_flags, r1, r2, parent_segment=seg)
seg.sections.append(sec)
self.sections.append(sec)
self.sections_by_ordinal.extend(seg.sections)
if segname == b'__PAGEZERO':
log.info('Found PAGEZERO, skipping backer for memory conservation')
page_zero = UninitializedClemory(arch=self.arch, size=seg.memsize)
self.memory.add_backer(seg.vaddr, page_zero)
elif seg.filesize > 0:
<DeepExtract>
f.seek(seg.offset)
blob = f.read(seg.filesize)
</DeepExtract>
if seg.filesize < seg.memsize:
blob += b'\x00' * (seg.memsize - seg.filesize)
vaddr_offset = AT.from_mva(seg.vaddr, self).to_rva()
self.memory.add_backer(vaddr_offset, blob)
self.segments.append(seg)
|
def _load_segment(f, offset):
"""
Handles LC_SEGMENT(_64) commands
:param f: input file
:param offset: starting offset of the LC_SEGMENT command
:return:
"""
is64 = self.arch.bits == 64
if not is64:
segment_s_size = 56
(_, _, segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags) = self._unpack_with_byteorder('2I16s8I', self._read(f, offset, segment_s_size))
else:
segment_s_size = 72
(_, _, segname, vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags) = self._unpack_with_byteorder('2I16s4Q4I', self._read(f, offset, segment_s_size))
segname = segname.replace(b'\x00', b'')
log.debug('Processing segment %r', segname)
seg = MachOSegment(fileoff, vmaddr, filesize, vmsize, segname, nsects, [], flags, initprot, maxprot)
if not is64:
section_s_size = 68
section_s_packstr = '16s16s9I'
else:
section_s_size = 80
section_s_packstr = '16s16s2Q6IQ'
section_start = offset + segment_s_size
for i in range(0, nsects):
log.debug('Processing section # %d in %r', i + 1, segname)
(section_sectname, section_segname, section_vaddr, section_vsize, section_foff, section_align, section_reloff, section_nreloc, section_flags, r1, r2) = self._unpack_with_byteorder(section_s_packstr, self._read(f, i * section_s_size + section_start, section_s_size))
section_sectname = section_sectname.replace(b'\x00', b'')
section_segname = section_segname.replace(b'\x00', b'')
sec = MachOSection(section_foff, section_vaddr, section_vsize, section_vsize, section_segname, section_sectname, section_align, section_reloff, section_nreloc, section_flags, r1, r2, parent_segment=seg)
seg.sections.append(sec)
self.sections.append(sec)
self.sections_by_ordinal.extend(seg.sections)
if segname == b'__PAGEZERO':
log.info('Found PAGEZERO, skipping backer for memory conservation')
page_zero = UninitializedClemory(arch=self.arch, size=seg.memsize)
self.memory.add_backer(seg.vaddr, page_zero)
elif seg.filesize > 0:
f.seek(seg.offset)
blob = f.read(seg.filesize)
if seg.filesize < seg.memsize:
blob += b'\x00' * (seg.memsize - seg.filesize)
vaddr_offset = AT.from_mva(seg.vaddr, self).to_rva()
self.memory.add_backer(vaddr_offset, blob)
self.segments.append(seg)
|
cle
|
positive
|
def forecast_outsample(self):
<DeepExtract>
(dataframe, freq) = parse_data(self.df)
(models, _) = train_models(dataframe['Target'], models=self.model_list, forecast_len=self.forecast_len, full_df=dataframe, seasonality=self.season, in_sample=False, freq=freq, GPU=self.GPU)
(models_dict, freq, dataframe) = (models, freq, dataframe['Target'])
</DeepExtract>
self.models_dict_out = models_dict
self.freq = freq
<DeepExtract>
global fb
forecast_dict = {}
for (name, model) in models_dict.items():
if False:
print('Model {} is being used to forcast in sample'.format(name))
else:
print('Model {} is being used to forcast out of sample'.format(name))
if name == 'ARIMA':
forecast_dict[name] = model.predict(self.forecast_len)
if name == 'Prophet':
future = model.make_future_dataframe(periods=self.forecast_len, freq=freq)
future_pred = model.predict(future)
forecast_dict[name] = original_dataframe(future_pred, freq)[-self.forecast_len:]
fb = original_dataframe(future_pred, freq)[-self.forecast_len:]
if name == 'HWAAS':
forecast_dict[name] = model.forecast(self.forecast_len)
if name in ['HWAMS', 'HWAS']:
forecast_dict[name] = model.forecast(self.forecast_len)
if name == 'PYAF':
forecast_dict[name] = model['Target_Forecast'][-self.forecast_len:].values
if name == 'Gluonts':
if freq == 'MS':
freq = 'M'
if False:
for (df_entry, forecast) in zip(gluonts_dataframe(dataframe), model.predict(gluonts_dataframe(dataframe))):
forecast_dict[name] = forecast.samples.mean(axis=0)
else:
future = ListDataset([{'target': dataframe[-self.forecast_len:], 'start': dataframe.index[-1] + dataframe.index.to_series().diff().min()}], freq=freq)
for (df_entry, forecast) in zip(future, model.predict(future)):
forecast_dict[name] = forecast.samples.mean(axis=0)
if name == 'NBEATS':
if False:
net = model['model']
x_test = model['x_test']
y_test = model['y_test']
norm_constant = model['constant']
net.eval()
(_, forecast) = net(torch.tensor(x_test, dtype=torch.float))
if self.GPU:
p = forecast.cpu().detach().numpy()
else:
p = forecast.detach().numpy()
forecast_dict[name] = p[-1] * norm_constant
else:
net = model['model']
net.eval()
(x_train, y_train, net, norm_constant) = model['tuple']
(_, forecast) = net(torch.tensor(x_train, dtype=torch.float))
if self.GPU:
p = forecast.cpu().detach().numpy()
else:
p = forecast.detach().numpy()
forecast_dict[name] = p[-1] * norm_constant
if name in ['TBA', 'TATS', 'TBAT', 'TBATS1', 'TBATS2', 'TBATS3', 'TBATP1']:
forecast_dict[name] = model.forecast(self.forecast_len)
forecast_dict = forecast_dict
</DeepExtract>
index = pd.date_range(dataframe.index[-1], periods=self.forecast_len + 1, freq=freq)[1:]
<DeepExtract>
ra = -1
for (name, forecast) in forecast_dict.items():
ra += 1
if ra == 0:
outsample = pd.DataFrame(data=forecast, columns=[name], index=index)
outsample[name] = forecast
else:
outsample[name] = forecast
forecast_frame = outsample
</DeepExtract>
print('Successfully finished out of sample forecast')
return forecast_frame
|
def forecast_outsample(self):
(dataframe, freq) = parse_data(self.df)
(models, _) = train_models(dataframe['Target'], models=self.model_list, forecast_len=self.forecast_len, full_df=dataframe, seasonality=self.season, in_sample=False, freq=freq, GPU=self.GPU)
(models_dict, freq, dataframe) = (models, freq, dataframe['Target'])
self.models_dict_out = models_dict
self.freq = freq
global fb
forecast_dict = {}
for (name, model) in models_dict.items():
if False:
print('Model {} is being used to forcast in sample'.format(name))
else:
print('Model {} is being used to forcast out of sample'.format(name))
if name == 'ARIMA':
forecast_dict[name] = model.predict(self.forecast_len)
if name == 'Prophet':
future = model.make_future_dataframe(periods=self.forecast_len, freq=freq)
future_pred = model.predict(future)
forecast_dict[name] = original_dataframe(future_pred, freq)[-self.forecast_len:]
fb = original_dataframe(future_pred, freq)[-self.forecast_len:]
if name == 'HWAAS':
forecast_dict[name] = model.forecast(self.forecast_len)
if name in ['HWAMS', 'HWAS']:
forecast_dict[name] = model.forecast(self.forecast_len)
if name == 'PYAF':
forecast_dict[name] = model['Target_Forecast'][-self.forecast_len:].values
if name == 'Gluonts':
if freq == 'MS':
freq = 'M'
if False:
for (df_entry, forecast) in zip(gluonts_dataframe(dataframe), model.predict(gluonts_dataframe(dataframe))):
forecast_dict[name] = forecast.samples.mean(axis=0)
else:
future = ListDataset([{'target': dataframe[-self.forecast_len:], 'start': dataframe.index[-1] + dataframe.index.to_series().diff().min()}], freq=freq)
for (df_entry, forecast) in zip(future, model.predict(future)):
forecast_dict[name] = forecast.samples.mean(axis=0)
if name == 'NBEATS':
if False:
net = model['model']
x_test = model['x_test']
y_test = model['y_test']
norm_constant = model['constant']
net.eval()
(_, forecast) = net(torch.tensor(x_test, dtype=torch.float))
if self.GPU:
p = forecast.cpu().detach().numpy()
else:
p = forecast.detach().numpy()
forecast_dict[name] = p[-1] * norm_constant
else:
net = model['model']
net.eval()
(x_train, y_train, net, norm_constant) = model['tuple']
(_, forecast) = net(torch.tensor(x_train, dtype=torch.float))
if self.GPU:
p = forecast.cpu().detach().numpy()
else:
p = forecast.detach().numpy()
forecast_dict[name] = p[-1] * norm_constant
if name in ['TBA', 'TATS', 'TBAT', 'TBATS1', 'TBATS2', 'TBATS3', 'TBATP1']:
forecast_dict[name] = model.forecast(self.forecast_len)
forecast_dict = forecast_dict
index = pd.date_range(dataframe.index[-1], periods=self.forecast_len + 1, freq=freq)[1:]
ra = -1
for (name, forecast) in forecast_dict.items():
ra += 1
if ra == 0:
outsample = pd.DataFrame(data=forecast, columns=[name], index=index)
outsample[name] = forecast
else:
outsample[name] = forecast
forecast_frame = outsample
print('Successfully finished out of sample forecast')
return forecast_frame
|
atspy
|
positive
|
def flag_low_snr(self, snr_cut=3, output='kept'):
"""Flag low snr data points
Args:
snr_cut (float): remove points with snr lower than this
output (str): returns 'kept', 'flagged', or 'both' (a dictionary)
Returns:
(Obsdata): a observation object with flagged data points removed
"""
mask = self.unpack('snr')['snr'] > snr_cut
datatable_kept = self.data.copy()
datatable_flagged = self.data.copy()
datatable_kept = datatable_kept[mask]
datatable_flagged = datatable_flagged[np.invert(mask)]
print('snr flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))
<DeepExtract>
newobs = copy.deepcopy(self)
obs_kept = newobs
</DeepExtract>
<DeepExtract>
newobs = copy.deepcopy(self)
obs_flagged = newobs
</DeepExtract>
obs_kept.data = datatable_kept
obs_flagged.data = datatable_flagged
if output == 'flagged':
return obs_flagged
elif output == 'both':
return {'kept': obs_kept, 'flagged': obs_flagged}
else:
return obs_kept
|
def flag_low_snr(self, snr_cut=3, output='kept'):
"""Flag low snr data points
Args:
snr_cut (float): remove points with snr lower than this
output (str): returns 'kept', 'flagged', or 'both' (a dictionary)
Returns:
(Obsdata): a observation object with flagged data points removed
"""
mask = self.unpack('snr')['snr'] > snr_cut
datatable_kept = self.data.copy()
datatable_flagged = self.data.copy()
datatable_kept = datatable_kept[mask]
datatable_flagged = datatable_flagged[np.invert(mask)]
print('snr flagged %d/%d visibilities' % (len(datatable_flagged), len(self.data)))
newobs = copy.deepcopy(self)
obs_kept = newobs
newobs = copy.deepcopy(self)
obs_flagged = newobs
obs_kept.data = datatable_kept
obs_flagged.data = datatable_flagged
if output == 'flagged':
return obs_flagged
elif output == 'both':
return {'kept': obs_kept, 'flagged': obs_flagged}
else:
return obs_kept
|
eht-imaging
|
positive
|
@pytest.mark.skip('Fail Github Action')
@pytest.mark.parametrize('dtype,dim', itertools.product(numerical_data_type(), dimensions()))
def test_hier_neighbor_allreduce_fusion(hier_setup, dtype, dim):
(rank, size, local_rank, local_size) = hier_setup
machine_rank = (rank - local_rank) // local_size
machine_size = size // local_size
neighbor_ranks = bf.in_neighbor_machine_ranks()
expected_value = (machine_rank + sum(neighbor_ranks)) / (len(neighbor_ranks) + 1)
K = 50
(tensor_list, handles, names) = ([], [], [])
for i in range(K):
tensor = torch.FloatTensor(*[23] * dim).fill_(i + (rank - (local_size - 1) / 2.0) / local_size)
<DeepExtract>
if dtype.is_cuda:
if bf.nccl_built() and bf.local_size() > torch.cuda.device_count():
raise EnvironmentError('Cannot run number of processes in one machine more than GPU device count in NCCL environment')
tensor = tensor.cuda(bf.rank() % torch.cuda.device_count()).type(dtype)
tensor = tensor.type(dtype)
</DeepExtract>
tensor_list.append(tensor)
names.append('index{}_{}_{}'.format(i, dtype, dim))
for i in range(K):
handle = bf.hierarchical_neighbor_allreduce_nonblocking(tensor_list[i], name=names[i])
handles.append(handle)
outputs = []
for i in range(K):
output = bf.synchronize(handles[i])
outputs.append(output)
for i in range(K):
assert list(outputs[i].shape) == [23] * dim, f'{names[i]} (hierarchical neighbor allreduce fusion) produces incorrect reduced shape'
assert (outputs[i] - expected_value - i).abs().max() < EPSILON, f'{names[i]} (hierarchical neighbor allreduce fusion) produces incorrect reduced tensor when K = {i}'
|
@pytest.mark.skip('Fail Github Action')
@pytest.mark.parametrize('dtype,dim', itertools.product(numerical_data_type(), dimensions()))
def test_hier_neighbor_allreduce_fusion(hier_setup, dtype, dim):
(rank, size, local_rank, local_size) = hier_setup
machine_rank = (rank - local_rank) // local_size
machine_size = size // local_size
neighbor_ranks = bf.in_neighbor_machine_ranks()
expected_value = (machine_rank + sum(neighbor_ranks)) / (len(neighbor_ranks) + 1)
K = 50
(tensor_list, handles, names) = ([], [], [])
for i in range(K):
tensor = torch.FloatTensor(*[23] * dim).fill_(i + (rank - (local_size - 1) / 2.0) / local_size)
if dtype.is_cuda:
if bf.nccl_built() and bf.local_size() > torch.cuda.device_count():
raise EnvironmentError('Cannot run number of processes in one machine more than GPU device count in NCCL environment')
tensor = tensor.cuda(bf.rank() % torch.cuda.device_count()).type(dtype)
tensor = tensor.type(dtype)
tensor_list.append(tensor)
names.append('index{}_{}_{}'.format(i, dtype, dim))
for i in range(K):
handle = bf.hierarchical_neighbor_allreduce_nonblocking(tensor_list[i], name=names[i])
handles.append(handle)
outputs = []
for i in range(K):
output = bf.synchronize(handles[i])
outputs.append(output)
for i in range(K):
assert list(outputs[i].shape) == [23] * dim, f'{names[i]} (hierarchical neighbor allreduce fusion) produces incorrect reduced shape'
assert (outputs[i] - expected_value - i).abs().max() < EPSILON, f'{names[i]} (hierarchical neighbor allreduce fusion) produces incorrect reduced tensor when K = {i}'
|
bluefog
|
positive
|
def step_driver_online_offline_nodewise(self):
""" node wise control driver online offline
:return:
"""
moment = self.city_time % self.n_intervals
cur_idle_num = self.idle_driver_location_mat[moment]
self.all_grids_on_number = 0
self.all_grids_off_number = 0
new_drivers = 0
offline_drivers = 0
for (idx, target_node_id) in enumerate(self.target_node_ids):
on_off_number = cur_idle_num[idx] - self.nodes[target_node_id].idle_driver_num
if on_off_number > 0:
new_drivers += on_off_number
<DeepExtract>
while on_off_number > 0:
if self.nodes[target_node_id].offline_driver_num > 0:
self.nodes[target_node_id].set_offline_driver_online()
self.n_drivers += 1
self.n_offline_drivers -= 1
else:
n_total_drivers = len(self.drivers.keys())
added_driver_id = n_total_drivers
self.drivers[added_driver_id] = Driver(added_driver_id, self._time_limit)
self.drivers[added_driver_id].set_position(self.nodes[target_node_id])
self.nodes[target_node_id].add_driver(added_driver_id, self.drivers[added_driver_id])
self.n_drivers += 1
on_off_number -= 1
</DeepExtract>
self.all_grids_on_number += on_off_number
elif on_off_number < 0:
offline_drivers += on_off_number
<DeepExtract>
while abs(on_off_number) > 0:
if self.nodes[target_node_id].idle_driver_num > 0:
self.nodes[target_node_id].set_idle_driver_offline_random()
self.n_drivers -= 1
self.n_offline_drivers += 1
abs(on_off_number) -= 1
self.all_grids_off_number += 1
else:
break
</DeepExtract>
else:
pass
|
def step_driver_online_offline_nodewise(self):
""" node wise control driver online offline
:return:
"""
moment = self.city_time % self.n_intervals
cur_idle_num = self.idle_driver_location_mat[moment]
self.all_grids_on_number = 0
self.all_grids_off_number = 0
new_drivers = 0
offline_drivers = 0
for (idx, target_node_id) in enumerate(self.target_node_ids):
on_off_number = cur_idle_num[idx] - self.nodes[target_node_id].idle_driver_num
if on_off_number > 0:
new_drivers += on_off_number
while on_off_number > 0:
if self.nodes[target_node_id].offline_driver_num > 0:
self.nodes[target_node_id].set_offline_driver_online()
self.n_drivers += 1
self.n_offline_drivers -= 1
else:
n_total_drivers = len(self.drivers.keys())
added_driver_id = n_total_drivers
self.drivers[added_driver_id] = Driver(added_driver_id, self._time_limit)
self.drivers[added_driver_id].set_position(self.nodes[target_node_id])
self.nodes[target_node_id].add_driver(added_driver_id, self.drivers[added_driver_id])
self.n_drivers += 1
on_off_number -= 1
self.all_grids_on_number += on_off_number
elif on_off_number < 0:
offline_drivers += on_off_number
while abs(on_off_number) > 0:
if self.nodes[target_node_id].idle_driver_num > 0:
self.nodes[target_node_id].set_idle_driver_offline_random()
self.n_drivers -= 1
self.n_offline_drivers += 1
abs(on_off_number) -= 1
self.all_grids_off_number += 1
else:
break
else:
pass
|
CoRide
|
positive
|
def __init__(self, alpha=1.0, num_classes=10):
super(MobileNet_shallow, self).__init__()
self.conv1 = nn.Conv2d(3, int(32 * alpha), kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * alpha))
<DeepExtract>
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(int(32 * alpha), int(out_planes * alpha), stride))
32 = out_planes
self.layers = nn.Sequential(*layers)
</DeepExtract>
self.linear = nn.Linear(int(1024 * alpha), num_classes)
|
def __init__(self, alpha=1.0, num_classes=10):
super(MobileNet_shallow, self).__init__()
self.conv1 = nn.Conv2d(3, int(32 * alpha), kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(int(32 * alpha))
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(int(32 * alpha), int(out_planes * alpha), stride))
32 = out_planes
self.layers = nn.Sequential(*layers)
self.linear = nn.Linear(int(1024 * alpha), num_classes)
|
ATMC
|
positive
|
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = []
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
resp.close()
url = resp.headers['location']
method = req.method
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
parsed = urlparse(url)
url = parsed.geturl()
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
if resp.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
<DeepExtract>
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and (not should_bypass_proxies(url)):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
(username, password) = get_auth_from_url(new_proxies[scheme])
except KeyError:
(username, password) = (None, None)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
proxies = new_proxies
</DeepExtract>
<DeepExtract>
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
original_parsed = urlparse(resp.request.url)
redirect_parsed = urlparse(url)
if original_parsed.hostname != redirect_parsed.hostname:
del headers['Authorization']
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
</DeepExtract>
req = prepared_request
<DeepExtract>
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
if not isinstance(req, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while req.url in self.redirect_cache:
checked_urls.add(req.url)
new_url = self.redirect_cache.get(req.url)
if new_url in checked_urls:
break
req.url = new_url
False = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = req.hooks
adapter = self.get_adapter(url=req.url)
start = datetime.utcnow()
r = adapter.send(req, **kwargs)
r.elapsed = datetime.utcnow() - start
r = dispatch_hook('response', hooks, r, **kwargs)
if r.history:
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, req, r.raw)
gen = self.resolve_redirects(r, req, **kwargs)
history = [resp for resp in gen] if False else []
if history:
history.insert(0, r)
r = history.pop()
r.history = history
if not stream:
r.content
resp = r
</DeepExtract>
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
|
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = []
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
resp.close()
url = resp.headers['location']
method = req.method
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
parsed = urlparse(url)
url = parsed.geturl()
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
if resp.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and (not should_bypass_proxies(url)):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
(username, password) = get_auth_from_url(new_proxies[scheme])
except KeyError:
(username, password) = (None, None)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
proxies = new_proxies
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
original_parsed = urlparse(resp.request.url)
redirect_parsed = urlparse(url)
if original_parsed.hostname != redirect_parsed.hostname:
del headers['Authorization']
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
req = prepared_request
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
if not isinstance(req, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while req.url in self.redirect_cache:
checked_urls.add(req.url)
new_url = self.redirect_cache.get(req.url)
if new_url in checked_urls:
break
req.url = new_url
False = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = req.hooks
adapter = self.get_adapter(url=req.url)
start = datetime.utcnow()
r = adapter.send(req, **kwargs)
r.elapsed = datetime.utcnow() - start
r = dispatch_hook('response', hooks, r, **kwargs)
if r.history:
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, req, r.raw)
gen = self.resolve_redirects(r, req, **kwargs)
history = [resp for resp in gen] if False else []
if history:
history.insert(0, r)
r = history.pop()
r.history = history
if not stream:
r.content
resp = r
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
|
BruteSploit
|
positive
|
def train(self, data):
"""Training the model.
Args:
data: the Dataset object.
Returns:
dict: {}
"""
(self.gpu_id, self.config['device_str']) = self.get_device()
self.config['model']['n_users'] = data.n_users
self.config['model']['n_items'] = data.n_items
self.monitor = Monitor(log_dir=self.config['system']['run_dir'], delay=1, gpu_id=self.gpu_id)
<DeepExtract>
self.config['model']['n_users'] = data.n_users
self.config['model']['n_items'] = data.n_items
self.engine = UserKNNEngine(self.config)
</DeepExtract>
print(type(data.train))
print(data.train.head())
self.engine.model.prepare_model(data)
self.model_save_dir = os.path.join(self.config['system']['model_save_dir'], self.config['model']['save_name'])
self.config['run_time'] = self.monitor.stop()
return 'data loaded'
|
def train(self, data):
"""Training the model.
Args:
data: the Dataset object.
Returns:
dict: {}
"""
(self.gpu_id, self.config['device_str']) = self.get_device()
self.config['model']['n_users'] = data.n_users
self.config['model']['n_items'] = data.n_items
self.monitor = Monitor(log_dir=self.config['system']['run_dir'], delay=1, gpu_id=self.gpu_id)
self.config['model']['n_users'] = data.n_users
self.config['model']['n_items'] = data.n_items
self.engine = UserKNNEngine(self.config)
print(type(data.train))
print(data.train.head())
self.engine.model.prepare_model(data)
self.model_save_dir = os.path.join(self.config['system']['model_save_dir'], self.config['model']['save_name'])
self.config['run_time'] = self.monitor.stop()
return 'data loaded'
|
beta-recsys
|
positive
|
def pre2post_mean(pre_values, post_num, post_ids, pre_ids=None):
"""The pre-to-post synaptic mean computation.
Parameters
----------
pre_values: float, ArrayType
The pre-synaptic values.
pre_ids: ArrayType
The connected pre-synaptic neuron ids.
post_ids: ArrayType
The connected post-synaptic neuron ids.
post_num: int
Output dimension. The number of post-synaptic neurons.
Returns
-------
post_val: ArrayType
The value with the size of post-synaptic neurons.
"""
out = jnp.zeros(post_num)
pre_values = as_jax(pre_values)
post_ids = as_jax(post_ids)
if jnp.ndim(pre_values) == 0:
return out.at[post_ids].set(pre_values)
else:
<DeepExtract>
if pre_ids is None:
raise MathError(f'pre2post synaptic computation needs "pre_ids" when providing heterogeneous "pre_values" (brainpy.math.ndim(pre_values) != 0).')
</DeepExtract>
pre_ids = as_jax(pre_ids)
<DeepExtract>
pre_values = as_jax(pre_values)
pre_ids = as_jax(pre_ids)
if jnp.ndim(pre_values) == 0:
pre_values = jnp.ones(len(pre_ids), dtype=pre_values.dtype) * pre_values
else:
pre_values = _pre2syn(pre_ids, pre_values)
</DeepExtract>
return syn2post_mean(pre_values, post_ids, post_num)
|
def pre2post_mean(pre_values, post_num, post_ids, pre_ids=None):
"""The pre-to-post synaptic mean computation.
Parameters
----------
pre_values: float, ArrayType
The pre-synaptic values.
pre_ids: ArrayType
The connected pre-synaptic neuron ids.
post_ids: ArrayType
The connected post-synaptic neuron ids.
post_num: int
Output dimension. The number of post-synaptic neurons.
Returns
-------
post_val: ArrayType
The value with the size of post-synaptic neurons.
"""
out = jnp.zeros(post_num)
pre_values = as_jax(pre_values)
post_ids = as_jax(post_ids)
if jnp.ndim(pre_values) == 0:
return out.at[post_ids].set(pre_values)
else:
if pre_ids is None:
raise MathError(f'pre2post synaptic computation needs "pre_ids" when providing heterogeneous "pre_values" (brainpy.math.ndim(pre_values) != 0).')
pre_ids = as_jax(pre_ids)
pre_values = as_jax(pre_values)
pre_ids = as_jax(pre_ids)
if jnp.ndim(pre_values) == 0:
pre_values = jnp.ones(len(pre_ids), dtype=pre_values.dtype) * pre_values
else:
pre_values = _pre2syn(pre_ids, pre_values)
return syn2post_mean(pre_values, post_ids, post_num)
|
BrainPy
|
positive
|
def _export_library_visual_scenes(parent_element):
current_element = self._doc.createElement('library_visual_scenes')
visual_scene = self._doc.createElement('visual_scene')
visual_scene.setAttribute('id', 'scene')
visual_scene.setAttribute('name', 'scene')
current_element.appendChild(visual_scene)
parent_element.appendChild(current_element)
if utils.get_mesh_export_nodes(self._config.export_selected_nodes):
if utils.are_duplicate_nodes():
message = 'Duplicate Node Names'
bpy.ops.screen.display_error('INVOKE_DEFAULT', message=message)
for group in utils.get_mesh_export_nodes(self._config.export_selected_nodes):
<DeepExtract>
if not self._config.export_for_lumberyard:
node_name = 'CryExportNode_{}'.format(utils.get_node_name(group))
node = self._doc.createElement('node')
node.setAttribute('id', node_name)
node.setIdAttribute('id')
else:
node_name = '{}'.format(utils.get_node_name(group))
node = self._doc.createElement('node')
node.setAttribute('id', node_name)
node.setAttribute('LumberyardExportNode', '1')
node.setIdAttribute('id')
root_objects = []
for object_ in group.objects:
if utils.is_visual_scene_node_writed(object_, group):
root_objects.append(object_)
node = self._write_visual_scene_node(root_objects, node, group)
extra = self._create_cryengine_extra(group)
node.appendChild(extra)
visual_scene.appendChild(node)
</DeepExtract>
else:
pass
|
def _export_library_visual_scenes(parent_element):
current_element = self._doc.createElement('library_visual_scenes')
visual_scene = self._doc.createElement('visual_scene')
visual_scene.setAttribute('id', 'scene')
visual_scene.setAttribute('name', 'scene')
current_element.appendChild(visual_scene)
parent_element.appendChild(current_element)
if utils.get_mesh_export_nodes(self._config.export_selected_nodes):
if utils.are_duplicate_nodes():
message = 'Duplicate Node Names'
bpy.ops.screen.display_error('INVOKE_DEFAULT', message=message)
for group in utils.get_mesh_export_nodes(self._config.export_selected_nodes):
if not self._config.export_for_lumberyard:
node_name = 'CryExportNode_{}'.format(utils.get_node_name(group))
node = self._doc.createElement('node')
node.setAttribute('id', node_name)
node.setIdAttribute('id')
else:
node_name = '{}'.format(utils.get_node_name(group))
node = self._doc.createElement('node')
node.setAttribute('id', node_name)
node.setAttribute('LumberyardExportNode', '1')
node.setIdAttribute('id')
root_objects = []
for object_ in group.objects:
if utils.is_visual_scene_node_writed(object_, group):
root_objects.append(object_)
node = self._write_visual_scene_node(root_objects, node, group)
extra = self._create_cryengine_extra(group)
node.appendChild(extra)
visual_scene.appendChild(node)
else:
pass
|
BCRYExporter
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.