before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def get_at(self, element_path: ElementPath=None, check_parent: bool=True) -> Union[OscalBaseModel, List[OscalBaseModel]]:
"""Get the element at the specified element path.
it will return the sub-model object at the path. Sub-model object
can be of type OscalBaseModel or List
"""
if element_path is None:
return self._elem
<DeepExtract>
path_parts = element_path.get()
root_model = path_parts[0]
path_parts = path_parts[1:]
(_, path_parts) = (root_model, path_parts)
</DeepExtract>
elm = self._elem
if hasattr(elm, '__root__') and (isinstance(elm.__root__, dict) or isinstance(elm.__root__, list)):
elm = elm.__root__
if check_parent and element_path.get_parent() is not None and (element_path.get_parent().get_last() != ElementPath.WILDCARD):
<DeepExtract>
if element_path.get_parent() is None:
elm_at = self._elem
(_, path_parts) = self._split_element_path(element_path.get_parent())
elm = self._elem
if hasattr(elm, '__root__') and (isinstance(elm.__root__, dict) or isinstance(elm.__root__, list)):
elm = elm.__root__
if check_parent and element_path.get_parent().get_parent() is not None and (element_path.get_parent().get_parent().get_last() != ElementPath.WILDCARD):
elm_at = self.get_at(element_path.get_parent().get_parent())
if elm_at is None:
raise TrestleNotFoundError(f'Invalid parent path {element_path.get_parent().get_parent()}')
elm = elm_at
for attr in path_parts:
if elm is None:
break
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
elm_at = None
else:
elm = elm.get_field_value_by_alias(attr)
elm_at = elm
</DeepExtract>
if elm_at is None:
raise TrestleNotFoundError(f'Invalid parent path {element_path.get_parent()}')
elm = elm_at
for attr in path_parts:
if elm is None:
break
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
return None
else:
elm = elm.get_field_value_by_alias(attr)
return elm
|
def get_at(self, element_path: ElementPath=None, check_parent: bool=True) -> Union[OscalBaseModel, List[OscalBaseModel]]:
"""Get the element at the specified element path.
it will return the sub-model object at the path. Sub-model object
can be of type OscalBaseModel or List
"""
if element_path is None:
return self._elem
path_parts = element_path.get()
root_model = path_parts[0]
path_parts = path_parts[1:]
(_, path_parts) = (root_model, path_parts)
elm = self._elem
if hasattr(elm, '__root__') and (isinstance(elm.__root__, dict) or isinstance(elm.__root__, list)):
elm = elm.__root__
if check_parent and element_path.get_parent() is not None and (element_path.get_parent().get_last() != ElementPath.WILDCARD):
if element_path.get_parent() is None:
elm_at = self._elem
(_, path_parts) = self._split_element_path(element_path.get_parent())
elm = self._elem
if hasattr(elm, '__root__') and (isinstance(elm.__root__, dict) or isinstance(elm.__root__, list)):
elm = elm.__root__
if check_parent and element_path.get_parent().get_parent() is not None and (element_path.get_parent().get_parent().get_last() != ElementPath.WILDCARD):
elm_at = self.get_at(element_path.get_parent().get_parent())
if elm_at is None:
raise TrestleNotFoundError(f'Invalid parent path {element_path.get_parent().get_parent()}')
elm = elm_at
for attr in path_parts:
if elm is None:
break
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
elm_at = None
else:
elm = elm.get_field_value_by_alias(attr)
elm_at = elm
if elm_at is None:
raise TrestleNotFoundError(f'Invalid parent path {element_path.get_parent()}')
elm = elm_at
for attr in path_parts:
if elm is None:
break
if attr == ElementPath.WILDCARD:
break
elif attr.isnumeric():
if isinstance(elm, list):
elm = elm[int(attr)]
else:
return None
else:
elm = elm.get_field_value_by_alias(attr)
return elm
|
compliance-trestle
|
positive
|
def _paraminsert(cur):
<DeepExtract>
cur.execute(xddl1)
cur.execute(ddl1)
</DeepExtract>
cur.execute("insert into %sbooze values ('Victoria Bitter')" % table_prefix)
assert cur.rowcount in (-1, 1)
if driver.paramstyle == 'qmark':
cur.execute('insert into %sbooze values (?)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'numeric':
cur.execute('insert into %sbooze values (:1)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'named':
cur.execute('insert into %sbooze values (:beer)' % table_prefix, {'beer': "Cooper's"})
elif driver.paramstyle == 'format':
cur.execute('insert into %sbooze values (%%s)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'pyformat':
cur.execute('insert into %sbooze values (%%(beer)s)' % table_prefix, {'beer': "Cooper's"})
else:
assert False, 'Invalid paramstyle'
assert cur.rowcount in (-1, 1)
cur.execute('select name from %sbooze' % table_prefix)
res: typing.Tuple = cur.fetchall()
assert len(res) == 2, 'cursor.fetchall returned too few rows'
beers: typing.List = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted incorrectly'
assert beers[1] == 'Victoria Bitter', 'cursor.fetchall retrieved incorrect data, or data inserted incorrectly'
|
def _paraminsert(cur):
cur.execute(xddl1)
cur.execute(ddl1)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % table_prefix)
assert cur.rowcount in (-1, 1)
if driver.paramstyle == 'qmark':
cur.execute('insert into %sbooze values (?)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'numeric':
cur.execute('insert into %sbooze values (:1)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'named':
cur.execute('insert into %sbooze values (:beer)' % table_prefix, {'beer': "Cooper's"})
elif driver.paramstyle == 'format':
cur.execute('insert into %sbooze values (%%s)' % table_prefix, ("Cooper's",))
elif driver.paramstyle == 'pyformat':
cur.execute('insert into %sbooze values (%%(beer)s)' % table_prefix, {'beer': "Cooper's"})
else:
assert False, 'Invalid paramstyle'
assert cur.rowcount in (-1, 1)
cur.execute('select name from %sbooze' % table_prefix)
res: typing.Tuple = cur.fetchall()
assert len(res) == 2, 'cursor.fetchall returned too few rows'
beers: typing.List = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Cooper's", 'cursor.fetchall retrieved incorrect data, or data inserted incorrectly'
assert beers[1] == 'Victoria Bitter', 'cursor.fetchall retrieved incorrect data, or data inserted incorrectly'
|
amazon-redshift-python-driver
|
positive
|
def CreateAndPlaceObjectOnFloorAtLocation(self, action):
object_mask = action['object_mask']
object_type = action['objectType']
force_action = False if 'forceAction' not in action else action['forceAction']
rot = 90 * (round(action['rotation']['y'] / 90) % 4)
mask = np.rot90(object_mask, k=-(rot // 90))
<DeepExtract>
(x, z) = (action['x'], action['z'])
row = round((self._max_z - z) / self.grid_size)
col = round((x - self._min_x) / self.grid_size)
(row, col) = (int(row), int(col))
</DeepExtract>
(row_rad, col_rad) = (mask.shape[0] // 2, mask.shape[1] // 2)
reachable_subset = self.agent_reachable_positions_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1]
if force_action or (np.logical_and(reachable_subset, mask) == mask).all():
if not force_action and np.any([self.agent_inside_range(agent_id, row - row_rad, row + row_rad, col - col_rad, col + col_rad) for agent_id in range(len(self.agents))]):
return (False, None)
self.agent_reachable_positions_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1] &= np.logical_not(mask)
<DeepExtract>
(row, col) = (row, col)
x = col * self.grid_size + self._min_x
z = -row * self.grid_size + self._max_z
xz = (x, z)
</DeepExtract>
object_id = object_type + '|{}'.format(len(self.tracked_objects) + 1)
floor_object = TrackedObject(self, object_id, object_type)
floor_object.row = row
floor_object.col = col
floor_object.rot = rot
floor_object.object_mask = mask
self.tracked_objects[object_id] = floor_object
return (True, {'position': {'x': xz[0], 'y': math.nan, 'z': xz[1]}, 'row': floor_object.row, 'col': floor_object.col, 'rotation': floor_object.rot, 'objectId': object_id})
return (False, None)
|
def CreateAndPlaceObjectOnFloorAtLocation(self, action):
object_mask = action['object_mask']
object_type = action['objectType']
force_action = False if 'forceAction' not in action else action['forceAction']
rot = 90 * (round(action['rotation']['y'] / 90) % 4)
mask = np.rot90(object_mask, k=-(rot // 90))
(x, z) = (action['x'], action['z'])
row = round((self._max_z - z) / self.grid_size)
col = round((x - self._min_x) / self.grid_size)
(row, col) = (int(row), int(col))
(row_rad, col_rad) = (mask.shape[0] // 2, mask.shape[1] // 2)
reachable_subset = self.agent_reachable_positions_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1]
if force_action or (np.logical_and(reachable_subset, mask) == mask).all():
if not force_action and np.any([self.agent_inside_range(agent_id, row - row_rad, row + row_rad, col - col_rad, col + col_rad) for agent_id in range(len(self.agents))]):
return (False, None)
self.agent_reachable_positions_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1] &= np.logical_not(mask)
(row, col) = (row, col)
x = col * self.grid_size + self._min_x
z = -row * self.grid_size + self._max_z
xz = (x, z)
object_id = object_type + '|{}'.format(len(self.tracked_objects) + 1)
floor_object = TrackedObject(self, object_id, object_type)
floor_object.row = row
floor_object.col = col
floor_object.rot = rot
floor_object.object_mask = mask
self.tracked_objects[object_id] = floor_object
return (True, {'position': {'x': xz[0], 'y': math.nan, 'z': xz[1]}, 'row': floor_object.row, 'col': floor_object.col, 'rotation': floor_object.rot, 'objectId': object_id})
return (False, None)
|
cordial-sync
|
positive
|
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
for _ in range(self.agent_params['num_critic_updates_per_agent_update']):
cr_loss = self.critic.update(ob_no, next_ob_no, re_n, terminal_n)
<DeepExtract>
v_s = self.sess.run([self.critic.critic_prediction], feed_dict={self.critic.sy_ob_no: ob_no})[0]
v_sprime = self.sess.run([self.critic.critic_prediction], feed_dict={self.critic.sy_ob_no: next_ob_no})[0]
adv_n = re_n + self.gamma * v_sprime * (1 - terminal_n) - v_s
if self.standardize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-08)
advantage = adv_n
</DeepExtract>
for _ in range(self.agent_params['num_actor_updates_per_agent_update']):
ac_loss = self.actor.update(ob_no, ac_na, advantage)
loss = OrderedDict()
loss['Critic_Loss'] = cr_loss
loss['Actor_Loss'] = ac_loss
return loss
|
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
for _ in range(self.agent_params['num_critic_updates_per_agent_update']):
cr_loss = self.critic.update(ob_no, next_ob_no, re_n, terminal_n)
v_s = self.sess.run([self.critic.critic_prediction], feed_dict={self.critic.sy_ob_no: ob_no})[0]
v_sprime = self.sess.run([self.critic.critic_prediction], feed_dict={self.critic.sy_ob_no: next_ob_no})[0]
adv_n = re_n + self.gamma * v_sprime * (1 - terminal_n) - v_s
if self.standardize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-08)
advantage = adv_n
for _ in range(self.agent_params['num_actor_updates_per_agent_update']):
ac_loss = self.actor.update(ob_no, ac_na, advantage)
loss = OrderedDict()
loss['Critic_Loss'] = cr_loss
loss['Actor_Loss'] = ac_loss
return loss
|
CS285_Fa19_Deep_Reinforcement_Learning
|
positive
|
def test_mastodon(self):
<DeepExtract>
trigger = self.create_triggerservice(consumer_name='ServiceMastodon')
ServicesActivated.objects.get(name='ServiceMastodon')
resu = Mastodon.objects.create(tooter=tooter, timeline=timeline, tag=tag, fav=fav, since_id=since_id, max_id=max_id, trigger=trigger, status=True)
m = resu
</DeepExtract>
self.assertTrue(isinstance(m, Mastodon))
self.assertEqual(m.show(), 'My Mastodon %s %s' % (m.timeline, m.trigger))
self.assertEqual(m.__str__(), '{}'.format(m.timeline))
|
def test_mastodon(self):
trigger = self.create_triggerservice(consumer_name='ServiceMastodon')
ServicesActivated.objects.get(name='ServiceMastodon')
resu = Mastodon.objects.create(tooter=tooter, timeline=timeline, tag=tag, fav=fav, since_id=since_id, max_id=max_id, trigger=trigger, status=True)
m = resu
self.assertTrue(isinstance(m, Mastodon))
self.assertEqual(m.show(), 'My Mastodon %s %s' % (m.timeline, m.trigger))
self.assertEqual(m.__str__(), '{}'.format(m.timeline))
|
django-th
|
positive
|
def __init__(self, do_device_discovery=True):
self._input_device = None
self._mux = [NoMux(self), TakeOverSelectiveMux(self), TakeOverMux(self)]
self._selected_mux = self._mux[0]
self.min_thrust = 0
self.max_thrust = 0
self._thrust_slew_rate = 0
self.thrust_slew_enabled = False
self.thrust_slew_limit = 0
self.has_pressure_sensor = False
self._hover_max_height = MAX_TARGET_HEIGHT
self.max_rp_angle = 0
self.max_yaw_rate = 0
try:
<DeepExtract>
self._assisted_control = Config().get('assistedControl')
</DeepExtract>
except KeyError:
<DeepExtract>
self._assisted_control = JoystickReader.ASSISTED_CONTROL_ALTHOLD
</DeepExtract>
self._old_thrust = 0
self._old_raw_thrust = 0
self.springy_throttle = True
self._target_height = INITAL_TAGET_HEIGHT
self.trim_roll = Config().get('trim_roll')
self.trim_pitch = Config().get('trim_pitch')
self._rp_dead_band = 0.1
self._input_map = None
if Config().get('flightmode') == 'Normal':
self.max_yaw_rate = Config().get('normal_max_yaw')
self.max_rp_angle = Config().get('normal_max_rp')
self.min_thrust = Config().get('normal_min_thrust')
self.max_thrust = Config().get('normal_max_thrust')
self.thrust_slew_limit = Config().get('normal_slew_limit')
self.thrust_slew_rate = Config().get('normal_slew_rate')
else:
self.max_yaw_rate = Config().get('max_yaw')
self.max_rp_angle = Config().get('max_rp')
self.min_thrust = Config().get('min_thrust')
self.max_thrust = Config().get('max_thrust')
self.thrust_slew_limit = Config().get('slew_limit')
self.thrust_slew_rate = Config().get('slew_rate')
self._dev_blacklist = None
if len(Config().get('input_device_blacklist')) > 0:
self._dev_blacklist = re.compile(Config().get('input_device_blacklist'))
logger.info('Using device blacklist [{}]'.format(Config().get('input_device_blacklist')))
self._available_devices = {}
self._read_timer = PeriodicTimer(INPUT_READ_PERIOD, self.read_input)
if do_device_discovery:
self._discovery_timer = PeriodicTimer(1.0, self._do_device_discovery)
self._discovery_timer.start()
if not os.path.exists(ConfigManager().configs_dir):
logger.info('No user config found, copying dist files')
os.makedirs(ConfigManager().configs_dir)
for f in glob.glob(cfclient.module_path + '/configs/input/[A-Za-z]*.json'):
dest = os.path.join(ConfigManager().configs_dir, os.path.basename(f))
if not os.path.isfile(dest):
logger.debug('Copying %s', f)
shutil.copy2(f, ConfigManager().configs_dir)
ConfigManager().get_list_of_configs()
self.input_updated = Caller()
self.assisted_input_updated = Caller()
self.heighthold_input_updated = Caller()
self.hover_input_updated = Caller()
self.rp_trim_updated = Caller()
self.emergency_stop_updated = Caller()
self.device_discovery = Caller()
self.device_error = Caller()
self.assisted_control_updated = Caller()
self.alt1_updated = Caller()
self.alt2_updated = Caller()
self.limiting_updated = Caller()
|
def __init__(self, do_device_discovery=True):
self._input_device = None
self._mux = [NoMux(self), TakeOverSelectiveMux(self), TakeOverMux(self)]
self._selected_mux = self._mux[0]
self.min_thrust = 0
self.max_thrust = 0
self._thrust_slew_rate = 0
self.thrust_slew_enabled = False
self.thrust_slew_limit = 0
self.has_pressure_sensor = False
self._hover_max_height = MAX_TARGET_HEIGHT
self.max_rp_angle = 0
self.max_yaw_rate = 0
try:
self._assisted_control = Config().get('assistedControl')
except KeyError:
self._assisted_control = JoystickReader.ASSISTED_CONTROL_ALTHOLD
self._old_thrust = 0
self._old_raw_thrust = 0
self.springy_throttle = True
self._target_height = INITAL_TAGET_HEIGHT
self.trim_roll = Config().get('trim_roll')
self.trim_pitch = Config().get('trim_pitch')
self._rp_dead_band = 0.1
self._input_map = None
if Config().get('flightmode') == 'Normal':
self.max_yaw_rate = Config().get('normal_max_yaw')
self.max_rp_angle = Config().get('normal_max_rp')
self.min_thrust = Config().get('normal_min_thrust')
self.max_thrust = Config().get('normal_max_thrust')
self.thrust_slew_limit = Config().get('normal_slew_limit')
self.thrust_slew_rate = Config().get('normal_slew_rate')
else:
self.max_yaw_rate = Config().get('max_yaw')
self.max_rp_angle = Config().get('max_rp')
self.min_thrust = Config().get('min_thrust')
self.max_thrust = Config().get('max_thrust')
self.thrust_slew_limit = Config().get('slew_limit')
self.thrust_slew_rate = Config().get('slew_rate')
self._dev_blacklist = None
if len(Config().get('input_device_blacklist')) > 0:
self._dev_blacklist = re.compile(Config().get('input_device_blacklist'))
logger.info('Using device blacklist [{}]'.format(Config().get('input_device_blacklist')))
self._available_devices = {}
self._read_timer = PeriodicTimer(INPUT_READ_PERIOD, self.read_input)
if do_device_discovery:
self._discovery_timer = PeriodicTimer(1.0, self._do_device_discovery)
self._discovery_timer.start()
if not os.path.exists(ConfigManager().configs_dir):
logger.info('No user config found, copying dist files')
os.makedirs(ConfigManager().configs_dir)
for f in glob.glob(cfclient.module_path + '/configs/input/[A-Za-z]*.json'):
dest = os.path.join(ConfigManager().configs_dir, os.path.basename(f))
if not os.path.isfile(dest):
logger.debug('Copying %s', f)
shutil.copy2(f, ConfigManager().configs_dir)
ConfigManager().get_list_of_configs()
self.input_updated = Caller()
self.assisted_input_updated = Caller()
self.heighthold_input_updated = Caller()
self.hover_input_updated = Caller()
self.rp_trim_updated = Caller()
self.emergency_stop_updated = Caller()
self.device_discovery = Caller()
self.device_error = Caller()
self.assisted_control_updated = Caller()
self.alt1_updated = Caller()
self.alt2_updated = Caller()
self.limiting_updated = Caller()
|
crazyflie-clients-python
|
positive
|
def gen_postinit(self, cls: ClassDefinition, slot: SlotDefinition) -> Optional[str]:
""" Generate python post init rules for slot in class
"""
rlines: List[str] = []
aliased_slot_name = self.slot_name(slot.name)
<DeepExtract>
rangelist = self.class_identifier_path(cls, False) if slot.key or slot.identifier else self.slot_range_path(slot)
prox_type = self.slot_range_path(slot)[-1].rsplit('.')[-1]
prox_type_name = rangelist[-1]
if slot.range in self.schema.enums or (cls and slot.inlined and (slot.range in self.schema.classes) and self.forward_reference(slot.range, cls.name)):
rangelist[-1] = f'"{rangelist[-1]}"'
(range_type, base_type, base_type_name) = (str(self.gen_class_reference(rangelist)), prox_type, prox_type_name)
</DeepExtract>
slot_identifier = self.class_identifier(slot.range)
if slot.required:
rlines.append(f'if self.{aliased_slot_name} is None:')
rlines.append(f'\traise ValueError("{aliased_slot_name} must be supplied")')
if slot.multivalued:
if slot.inlined and slot_identifier:
rlines.append(f'elif not isinstance(self.{aliased_slot_name}, (list, dict)):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
rlines.append(f'if len(self.{aliased_slot_name}) == 0:')
rlines.append(f'\traise ValueError(f"{aliased_slot_name} must be a non-empty list, dictionary, or class")')
else:
rlines.append(f'elif not isinstance(self.{aliased_slot_name}, list):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
rlines.append(f'elif len(self.{aliased_slot_name}) == 0:')
rlines.append(f'\traise ValueError(f"{aliased_slot_name} must be a non-empty list")')
elif slot.multivalued:
rlines.append(f'if self.{aliased_slot_name} is None:')
rlines.append(f'\tself.{aliased_slot_name} = []')
if slot.inlined and slot_identifier:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, (list, dict)):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
else:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, list):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
indent = len(f'self.{aliased_slot_name} = [') * ' '
if not slot.multivalued:
if slot.required:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, {base_type_name}):')
else:
rlines.append(f'if self.{aliased_slot_name} is not None and not isinstance(self.{aliased_slot_name}, {base_type_name}):')
if slot.range in self.schema.classes and (not self.schema.classes[slot.range].slots):
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}()')
elif self.class_identifier(slot.range) or slot.range in self.schema.types or slot.range in self.schema.enums:
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}(self.{aliased_slot_name})')
else:
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}(**self.{aliased_slot_name})')
elif slot.inlined:
slot_range_cls = self.schema.classes[slot.range]
identifier = self.class_identifier(slot_range_cls)
if not identifier:
for range_slot_name in slot_range_cls.slots:
range_slot = self.schema.slots[range_slot_name]
if range_slot.required:
inlined_as_list = True
keyed = False
identifier = range_slot.name
break
else:
inlined_as_list = slot.inlined_as_list
keyed = True
if identifier:
rlines.append(f'self._normalize_inlined_slot(slot_name="{aliased_slot_name}", slot_type={base_type_name}, key_name="{self.aliased_slot_name(identifier)}", inlined_as_list={inlined_as_list}, keyed={keyed})')
else:
sn = f'self.{aliased_slot_name}'
rlines.append(f'{sn} = [v if isinstance(v, {base_type_name}) else {base_type_name}(**v) for v in {sn}]')
else:
rlines.append(f'self.{aliased_slot_name} = [v if isinstance(v, {base_type_name}) else {base_type_name}(v) for v in self.{aliased_slot_name}]')
if rlines:
rlines.append('')
return '\n\t\t'.join(rlines)
|
def gen_postinit(self, cls: ClassDefinition, slot: SlotDefinition) -> Optional[str]:
""" Generate python post init rules for slot in class
"""
rlines: List[str] = []
aliased_slot_name = self.slot_name(slot.name)
rangelist = self.class_identifier_path(cls, False) if slot.key or slot.identifier else self.slot_range_path(slot)
prox_type = self.slot_range_path(slot)[-1].rsplit('.')[-1]
prox_type_name = rangelist[-1]
if slot.range in self.schema.enums or (cls and slot.inlined and (slot.range in self.schema.classes) and self.forward_reference(slot.range, cls.name)):
rangelist[-1] = f'"{rangelist[-1]}"'
(range_type, base_type, base_type_name) = (str(self.gen_class_reference(rangelist)), prox_type, prox_type_name)
slot_identifier = self.class_identifier(slot.range)
if slot.required:
rlines.append(f'if self.{aliased_slot_name} is None:')
rlines.append(f'\traise ValueError("{aliased_slot_name} must be supplied")')
if slot.multivalued:
if slot.inlined and slot_identifier:
rlines.append(f'elif not isinstance(self.{aliased_slot_name}, (list, dict)):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
rlines.append(f'if len(self.{aliased_slot_name}) == 0:')
rlines.append(f'\traise ValueError(f"{aliased_slot_name} must be a non-empty list, dictionary, or class")')
else:
rlines.append(f'elif not isinstance(self.{aliased_slot_name}, list):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
rlines.append(f'elif len(self.{aliased_slot_name}) == 0:')
rlines.append(f'\traise ValueError(f"{aliased_slot_name} must be a non-empty list")')
elif slot.multivalued:
rlines.append(f'if self.{aliased_slot_name} is None:')
rlines.append(f'\tself.{aliased_slot_name} = []')
if slot.inlined and slot_identifier:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, (list, dict)):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
else:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, list):')
rlines.append(f'\tself.{aliased_slot_name} = [self.{aliased_slot_name}]')
indent = len(f'self.{aliased_slot_name} = [') * ' '
if not slot.multivalued:
if slot.required:
rlines.append(f'if not isinstance(self.{aliased_slot_name}, {base_type_name}):')
else:
rlines.append(f'if self.{aliased_slot_name} is not None and not isinstance(self.{aliased_slot_name}, {base_type_name}):')
if slot.range in self.schema.classes and (not self.schema.classes[slot.range].slots):
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}()')
elif self.class_identifier(slot.range) or slot.range in self.schema.types or slot.range in self.schema.enums:
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}(self.{aliased_slot_name})')
else:
rlines.append(f'\tself.{aliased_slot_name} = {base_type_name}(**self.{aliased_slot_name})')
elif slot.inlined:
slot_range_cls = self.schema.classes[slot.range]
identifier = self.class_identifier(slot_range_cls)
if not identifier:
for range_slot_name in slot_range_cls.slots:
range_slot = self.schema.slots[range_slot_name]
if range_slot.required:
inlined_as_list = True
keyed = False
identifier = range_slot.name
break
else:
inlined_as_list = slot.inlined_as_list
keyed = True
if identifier:
rlines.append(f'self._normalize_inlined_slot(slot_name="{aliased_slot_name}", slot_type={base_type_name}, key_name="{self.aliased_slot_name(identifier)}", inlined_as_list={inlined_as_list}, keyed={keyed})')
else:
sn = f'self.{aliased_slot_name}'
rlines.append(f'{sn} = [v if isinstance(v, {base_type_name}) else {base_type_name}(**v) for v in {sn}]')
else:
rlines.append(f'self.{aliased_slot_name} = [v if isinstance(v, {base_type_name}) else {base_type_name}(v) for v in self.{aliased_slot_name}]')
if rlines:
rlines.append('')
return '\n\t\t'.join(rlines)
|
biolinkml
|
positive
|
def print_tree(spacer, root, dir):
spacer = spacer + ' '
if root == None:
return
<DeepExtract>
spacer = spacer + ' '
if root.right == None:
return
print_tree(spacer, root.right.right, 'right')
if 'right' == 'right':
print(spacer, '/ ', root.right.value)
if 'right' == 'center':
print(spacer + ' ', root.right.value)
if 'right' == 'left':
print(spacer, '\\ ', root.right.value)
print_tree(spacer, root.right.left, 'left')
</DeepExtract>
if dir == 'right':
print(spacer, '/ ', root.value)
if dir == 'center':
print(spacer + ' ', root.value)
if dir == 'left':
print(spacer, '\\ ', root.value)
<DeepExtract>
spacer = spacer + ' '
if root.left == None:
return
print_tree(spacer, root.left.right, 'right')
if 'left' == 'right':
print(spacer, '/ ', root.left.value)
if 'left' == 'center':
print(spacer + ' ', root.left.value)
if 'left' == 'left':
print(spacer, '\\ ', root.left.value)
print_tree(spacer, root.left.left, 'left')
</DeepExtract>
|
def print_tree(spacer, root, dir):
spacer = spacer + ' '
if root == None:
return
spacer = spacer + ' '
if root.right == None:
return
print_tree(spacer, root.right.right, 'right')
if 'right' == 'right':
print(spacer, '/ ', root.right.value)
if 'right' == 'center':
print(spacer + ' ', root.right.value)
if 'right' == 'left':
print(spacer, '\\ ', root.right.value)
print_tree(spacer, root.right.left, 'left')
if dir == 'right':
print(spacer, '/ ', root.value)
if dir == 'center':
print(spacer + ' ', root.value)
if dir == 'left':
print(spacer, '\\ ', root.value)
spacer = spacer + ' '
if root.left == None:
return
print_tree(spacer, root.left.right, 'right')
if 'left' == 'right':
print(spacer, '/ ', root.left.value)
if 'left' == 'center':
print(spacer + ' ', root.left.value)
if 'left' == 'left':
print(spacer, '\\ ', root.left.value)
print_tree(spacer, root.left.left, 'left')
</DeepExtract>
|
2017Challenges
|
positive
|
def __init__(self, options=None):
<DeepExtract>
self.__pos = position_marker()
self.__saved_pos = []
self.__bom = helpers.make_raw_bytes([])
self.__codec = None
self.__encoding = None
self.__input_is_bytes = False
self.__rawbuf = None
self.__raw_bytes = None
self.__cmax = 0
self.num_ws_skipped = 0
</DeepExtract>
self.options = options
|
def __init__(self, options=None):
self.__pos = position_marker()
self.__saved_pos = []
self.__bom = helpers.make_raw_bytes([])
self.__codec = None
self.__encoding = None
self.__input_is_bytes = False
self.__rawbuf = None
self.__raw_bytes = None
self.__cmax = 0
self.num_ws_skipped = 0
self.options = options
|
akshare
|
positive
|
def on_opt_change(self, config):
if 'taglist' in config and ('border' in config['taglist'] or 'wrap' in config['taglist']):
<DeepExtract>
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
</DeepExtract>
if 'color' in config or 'style' in config:
<DeepExtract>
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
</DeepExtract>
if 'story' not in config:
return
if 'format_attrs' in config['story']:
needed_attrs = []
for attr in config['story']['format_attrs']:
if attr not in self.content:
needed_attrs.append(attr)
if needed_attrs:
log.debug('%s needs: %s', self.id, needed_attrs)
tag_updater.need_attributes(self.id, needed_attrs)
<DeepExtract>
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
</DeepExtract>
|
def on_opt_change(self, config):
if 'taglist' in config and ('border' in config['taglist'] or 'wrap' in config['taglist']):
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
if 'color' in config or 'style' in config:
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
if 'story' not in config:
return
if 'format_attrs' in config['story']:
needed_attrs = []
for attr in config['story']['format_attrs']:
if attr not in self.content:
needed_attrs.append(attr)
if needed_attrs:
log.debug('%s needs: %s', self.id, needed_attrs)
tag_updater.need_attributes(self.id, needed_attrs)
self.changed = True
self.callbacks['set_var']('needs_redraw', True)
</DeepExtract>
|
canto-curses
|
positive
|
def test_invalid_enum_field_value():
<DeepExtract>
for (field, value) in {'command.payment.sender.status.status': 'invalid'}.items():
path = field.split('.')
dic = sample_request_json()
for f in path[0:len(path) - 1]:
if f not in dic:
dic[f] = {}
dic = dic[f]
dic[path[len(path) - 1]] = value
request_json = sample_request_json()
</DeepExtract>
<DeepExtract>
with pytest.raises(offchain.FieldError, match="expect one of \\['none', 'needs_kyc_data', 'ready_for_settlement', 'abort', 'soft_match'\\], but got: invalid") as e:
req = offchain.from_json(json.dumps(request_json))
offchain.from_dict(req.command, field_path='command')
assert e.value.code == 'invalid_field_value'
assert e.value.field == 'command.payment.sender.status.status'
</DeepExtract>
|
def test_invalid_enum_field_value():
for (field, value) in {'command.payment.sender.status.status': 'invalid'}.items():
path = field.split('.')
dic = sample_request_json()
for f in path[0:len(path) - 1]:
if f not in dic:
dic[f] = {}
dic = dic[f]
dic[path[len(path) - 1]] = value
request_json = sample_request_json()
with pytest.raises(offchain.FieldError, match="expect one of \\['none', 'needs_kyc_data', 'ready_for_settlement', 'abort', 'soft_match'\\], but got: invalid") as e:
req = offchain.from_json(json.dumps(request_json))
offchain.from_dict(req.command, field_path='command')
assert e.value.code == 'invalid_field_value'
assert e.value.field == 'command.payment.sender.status.status'
</DeepExtract>
|
client-sdk-python
|
positive
|
def combined_roidb(imdb_names, training=True):
"""
Combine multiple roidbs
"""
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
<DeepExtract>
roidb = imdb.roidb
if not imdb.name.startswith('coco'):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not imdb.name.startswith('coco'):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
</DeepExtract>
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
<DeepExtract>
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
roidb = imdb.roidb
</DeepExtract>
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
<DeepExtract>
print('before filtering, there are %d images...' % len(roidb))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % len(roidb))
roidb = roidb
</DeepExtract>
<DeepExtract>
ratio_large = 2
ratio_small = 0.5
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
(ratio_list, ratio_index) = (ratio_list[ratio_index], ratio_index)
</DeepExtract>
return (imdb, roidb, ratio_list, ratio_index)
|
def combined_roidb(imdb_names, training=True):
"""
Combine multiple roidbs
"""
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
roidb = imdb.roidb
if not imdb.name.startswith('coco'):
sizes = [PIL.Image.open(imdb.image_path_at(i)).size for i in range(imdb.num_images)]
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not imdb.name.startswith('coco'):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
roidb = imdb.roidb
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
print('before filtering, there are %d images...' % len(roidb))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % len(roidb))
roidb = roidb
ratio_large = 2
ratio_small = 0.5
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
(ratio_list, ratio_index) = (ratio_list[ratio_index], ratio_index)
return (imdb, roidb, ratio_list, ratio_index)
|
cascade-rcnn-fpn-faster_rcnn-pytorch1.0
|
positive
|
def update(self, iterable, **kwargs):
"""
This update is a bit different from the usual dict update method. It works recursively and will parse a
variety of Python containers, creating menus as necessary.
Keys of corresponding MenuItems in the Menu dictionary are the title of those MenuItems at the time of parsing.
"""
def parse_menu(iterable, menu, depth):
if isinstance(iterable, MenuItem):
menu.add(iterable)
return
for (n, ele) in enumerate(iterable.iteritems() if isinstance(iterable, Mapping) else iterable):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
<DeepExtract>
if isinstance(ele, MenuItem):
menu.add(ele)
return
for (n, ele) in enumerate(ele.iteritems() if isinstance(ele, Mapping) else ele):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, menu, depth)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menu.add(menuitem)
parse_menu(submenu, menuitem, depth + 1)
else:
menu.add(ele)
</DeepExtract>
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menu.add(menuitem)
<DeepExtract>
if isinstance(submenu, MenuItem):
menuitem.add(submenu)
return
for (n, ele) in enumerate(submenu.iteritems() if isinstance(submenu, Mapping) else submenu):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, menuitem, depth + 1)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth + 1, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menuitem.add(menuitem)
parse_menu(submenu, menuitem, depth + 1 + 1)
else:
menuitem.add(ele)
</DeepExtract>
else:
menu.add(ele)
<DeepExtract>
if isinstance(iterable, MenuItem):
self.add(iterable)
return
for (n, ele) in enumerate(iterable.iteritems() if isinstance(iterable, Mapping) else iterable):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, self, 0)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, 0, len(tuple(ele))))
menuitem = MenuItem(menuitem)
self.add(menuitem)
parse_menu(submenu, menuitem, 0 + 1)
else:
self.add(ele)
</DeepExtract>
<DeepExtract>
if isinstance(kwargs, MenuItem):
self.add(kwargs)
return
for (n, ele) in enumerate(kwargs.iteritems() if isinstance(kwargs, Mapping) else kwargs):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, self, 0)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, 0, len(tuple(ele))))
menuitem = MenuItem(menuitem)
self.add(menuitem)
parse_menu(submenu, menuitem, 0 + 1)
else:
self.add(ele)
</DeepExtract>
|
def update(self, iterable, **kwargs):
"""
This update is a bit different from the usual dict update method. It works recursively and will parse a
variety of Python containers, creating menus as necessary.
Keys of corresponding MenuItems in the Menu dictionary are the title of those MenuItems at the time of parsing.
"""
def parse_menu(iterable, menu, depth):
if isinstance(iterable, MenuItem):
menu.add(iterable)
return
for (n, ele) in enumerate(iterable.iteritems() if isinstance(iterable, Mapping) else iterable):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
if isinstance(ele, MenuItem):
menu.add(ele)
return
for (n, ele) in enumerate(ele.iteritems() if isinstance(ele, Mapping) else ele):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, menu, depth)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menu.add(menuitem)
parse_menu(submenu, menuitem, depth + 1)
else:
menu.add(ele)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menu.add(menuitem)
if isinstance(submenu, MenuItem):
menuitem.add(submenu)
return
for (n, ele) in enumerate(submenu.iteritems() if isinstance(submenu, Mapping) else submenu):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, menuitem, depth + 1)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, depth + 1, len(tuple(ele))))
menuitem = MenuItem(menuitem)
menuitem.add(menuitem)
parse_menu(submenu, menuitem, depth + 1 + 1)
else:
menuitem.add(ele)
else:
menu.add(ele)
if isinstance(iterable, MenuItem):
self.add(iterable)
return
for (n, ele) in enumerate(iterable.iteritems() if isinstance(iterable, Mapping) else iterable):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, self, 0)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, 0, len(tuple(ele))))
menuitem = MenuItem(menuitem)
self.add(menuitem)
parse_menu(submenu, menuitem, 0 + 1)
else:
self.add(ele)
if isinstance(kwargs, MenuItem):
self.add(kwargs)
return
for (n, ele) in enumerate(kwargs.iteritems() if isinstance(kwargs, Mapping) else kwargs):
if not isinstance(ele, MenuItem) and isinstance(ele, Mapping):
parse_menu(ele, self, 0)
elif not isinstance(ele, (basestring, MenuItem)) and isinstance(ele, Iterable):
try:
(menuitem, submenu) = ele
except TypeError:
raise ValueError('menu iterable element #{} at depth {} has length {}; must be a single menu item or a pair consisting of a menu item and its submenu'.format(n, 0, len(tuple(ele))))
menuitem = MenuItem(menuitem)
self.add(menuitem)
parse_menu(submenu, menuitem, 0 + 1)
else:
self.add(ele)
</DeepExtract>
|
ComicStreamer
|
positive
|
def inLine(self, string):
<DeepExtract>
if platform.system() == 'Windows':
csbi = GetConsoleScreenBufferInfo()
line = '\x08' * int(csbi.dwCursorPosition.X)
sys.stdout.write(line)
width = csbi.dwCursorPosition.X
csbi.dwCursorPosition.X = 0
FillConsoleOutputCharacter(STDOUT, ' ', width, csbi.dwCursorPosition)
sys.stdout.write(line)
sys.stdout.flush()
else:
sys.stdout.write('\x1b[1K')
sys.stdout.write('\x1b[0G')
</DeepExtract>
sys.stdout.write(string)
sys.stdout.flush()
self.lastInLine = True
|
def inLine(self, string):
if platform.system() == 'Windows':
csbi = GetConsoleScreenBufferInfo()
line = '\x08' * int(csbi.dwCursorPosition.X)
sys.stdout.write(line)
width = csbi.dwCursorPosition.X
csbi.dwCursorPosition.X = 0
FillConsoleOutputCharacter(STDOUT, ' ', width, csbi.dwCursorPosition)
sys.stdout.write(line)
sys.stdout.flush()
else:
sys.stdout.write('\x1b[1K')
sys.stdout.write('\x1b[0G')
sys.stdout.write(string)
sys.stdout.flush()
self.lastInLine = True
|
BruteSploit
|
positive
|
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
<DeepExtract>
self._fore = self._default & 7
self._back = self._default >> 4 & 7
self._style = self._default & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
</DeepExtract>
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
self._light = 0
|
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self._fore = self._default & 7
self._back = self._default >> 4 & 7
self._style = self._default & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
self._light = 0
|
BruteSploit
|
positive
|
def merge_sort(self, data):
if len(data) > 1:
mid = len(data) // 2
left = data[:mid]
right = data[mid:]
<DeepExtract>
if len(left) > 1:
mid = len(left) // 2
left = left[:mid]
right = left[mid:]
self.merge_sort(left)
self.merge_sort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
left[k] = left[i]
i += 1
else:
left[k] = right[j]
j += 1
k += 1
while i < len(left):
left[k] = left[i]
i += 1
k += 1
while j < len(right):
left[k] = right[j]
j += 1
k += 1
</DeepExtract>
<DeepExtract>
if len(right) > 1:
mid = len(right) // 2
left = right[:mid]
right = right[mid:]
self.merge_sort(left)
self.merge_sort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
right[k] = left[i]
i += 1
else:
right[k] = right[j]
j += 1
k += 1
while i < len(left):
right[k] = left[i]
i += 1
k += 1
while j < len(right):
right[k] = right[j]
j += 1
k += 1
</DeepExtract>
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
data[k] = left[i]
i += 1
else:
data[k] = right[j]
j += 1
k += 1
while i < len(left):
data[k] = left[i]
i += 1
k += 1
while j < len(right):
data[k] = right[j]
j += 1
k += 1
|
def merge_sort(self, data):
if len(data) > 1:
mid = len(data) // 2
left = data[:mid]
right = data[mid:]
if len(left) > 1:
mid = len(left) // 2
left = left[:mid]
right = left[mid:]
self.merge_sort(left)
self.merge_sort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
left[k] = left[i]
i += 1
else:
left[k] = right[j]
j += 1
k += 1
while i < len(left):
left[k] = left[i]
i += 1
k += 1
while j < len(right):
left[k] = right[j]
j += 1
k += 1
if len(right) > 1:
mid = len(right) // 2
left = right[:mid]
right = right[mid:]
self.merge_sort(left)
self.merge_sort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
right[k] = left[i]
i += 1
else:
right[k] = right[j]
j += 1
k += 1
while i < len(left):
right[k] = left[i]
i += 1
k += 1
while j < len(right):
right[k] = right[j]
j += 1
k += 1
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
data[k] = left[i]
i += 1
else:
data[k] = right[j]
j += 1
k += 1
while i < len(left):
data[k] = left[i]
i += 1
k += 1
while j < len(right):
data[k] = right[j]
j += 1
k += 1
|
data-structure-and-algorithms
|
positive
|
def read(context):
<DeepExtract>
parsed_dacl = []
context.log.debug('Parsing DACL')
i = 0
for ace in self.principal_security_descriptor['Dacl']['Data']:
parsed_ace = self.parseACE(context, ace)
parsed_dacl.append(parsed_ace)
i += 1
parsed_dacl = parsed_dacl
</DeepExtract>
<DeepExtract>
context.log.debug('Printing parsed DACL')
i = 0
for parsed_ace in parsed_dacl:
print_ace = True
if self.rights is not None:
try:
if self.rights == 'FullControl' and self.rights not in parsed_ace['Access mask']:
print_ace = False
if self.rights == 'DCSync' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.DS_Replication_Get_Changes_All.value not in parsed_ace['Object type (GUID)']):
print_ace = False
if self.rights == 'WriteMembers' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.WriteMembers.value not in parsed_ace['Object type (GUID)']):
print_ace = False
if self.rights == 'ResetPassword' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.ResetPassword.value not in parsed_ace['Object type (GUID)']):
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.rights_guid is not None:
try:
if 'Object type (GUID)' not in parsed_ace or self.rights_guid not in parsed_ace['Object type (GUID)']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.ace_type == 'allowed':
try:
if 'ACCESS_ALLOWED_OBJECT_ACE' not in parsed_ace['ACE Type'] and 'ACCESS_ALLOWED_ACE' not in parsed_ace['ACE Type']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
else:
try:
if 'ACCESS_DENIED_OBJECT_ACE' not in parsed_ace['ACE Type'] and 'ACCESS_DENIED_ACE' not in parsed_ace['ACE Type']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.principal_SID is not None:
try:
if self.principal_SID not in parsed_ace['Trustee (SID)']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if print_ace:
print('[*] %-28s' % 'ACE[%d] info' % i)
self.printparsedACE(parsed_ace)
i += 1
</DeepExtract>
return
|
def read(context):
parsed_dacl = []
context.log.debug('Parsing DACL')
i = 0
for ace in self.principal_security_descriptor['Dacl']['Data']:
parsed_ace = self.parseACE(context, ace)
parsed_dacl.append(parsed_ace)
i += 1
parsed_dacl = parsed_dacl
context.log.debug('Printing parsed DACL')
i = 0
for parsed_ace in parsed_dacl:
print_ace = True
if self.rights is not None:
try:
if self.rights == 'FullControl' and self.rights not in parsed_ace['Access mask']:
print_ace = False
if self.rights == 'DCSync' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.DS_Replication_Get_Changes_All.value not in parsed_ace['Object type (GUID)']):
print_ace = False
if self.rights == 'WriteMembers' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.WriteMembers.value not in parsed_ace['Object type (GUID)']):
print_ace = False
if self.rights == 'ResetPassword' and ('Object type (GUID)' not in parsed_ace or RIGHTS_GUID.ResetPassword.value not in parsed_ace['Object type (GUID)']):
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.rights_guid is not None:
try:
if 'Object type (GUID)' not in parsed_ace or self.rights_guid not in parsed_ace['Object type (GUID)']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.ace_type == 'allowed':
try:
if 'ACCESS_ALLOWED_OBJECT_ACE' not in parsed_ace['ACE Type'] and 'ACCESS_ALLOWED_ACE' not in parsed_ace['ACE Type']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
else:
try:
if 'ACCESS_DENIED_OBJECT_ACE' not in parsed_ace['ACE Type'] and 'ACCESS_DENIED_ACE' not in parsed_ace['ACE Type']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if self.principal_SID is not None:
try:
if self.principal_SID not in parsed_ace['Trustee (SID)']:
print_ace = False
except Exception as e:
context.log.error('Error filtering ACE, probably because of ACE type unsupported for parsing yet (%s)' % e)
if print_ace:
print('[*] %-28s' % 'ACE[%d] info' % i)
self.printparsedACE(parsed_ace)
i += 1
return
|
CrackMapExec
|
positive
|
def predict(self, x_test):
"""
Predictive mean and variance at x_test.
Args:
x_test (Tensor): [N, *], test input
"""
<DeepExtract>
output = self.model(x_test)
y = output
</DeepExtract>
y_pred_mean = y.mean(0)
EyyT = (y ** 2).mean(0)
EyEyT = y_pred_mean ** 2
beta_inv = -self.c0 * self.dt ** self.k0
y_pred_var = beta_inv.mean() + EyyT - EyEyT
return (y_pred_mean, y_pred_var)
|
def predict(self, x_test):
"""
Predictive mean and variance at x_test.
Args:
x_test (Tensor): [N, *], test input
"""
output = self.model(x_test)
y = output
y_pred_mean = y.mean(0)
EyyT = (y ** 2).mean(0)
EyEyT = y_pred_mean ** 2
beta_inv = -self.c0 * self.dt ** self.k0
y_pred_var = beta_inv.mean() + EyyT - EyEyT
return (y_pred_mean, y_pred_var)
|
ar-pde-cnn
|
positive
|
@pytest.mark.parametrize('explanation', ['average'], indirect=True)
def test__plot_two_pd_num_num(explanation):
""" Test the `_plot_two_pd_num_num` function. """
(feature, target_idx) = (4, 0)
(_, ax) = plt.subplots()
(ax, _) = _plot_two_pd_num_num(exp=explanation, feature=feature, target_idx=target_idx, ax=ax)
assert np.allclose(ax.get_xlim(), explanation.data['feature_values'][feature][0])
assert np.allclose(ax.get_ylim(), explanation.data['feature_values'][feature][1])
<DeepExtract>
if ax.collections[-2].get_segments() is not None and explanation.data['feature_deciles'][feature][0] is not None:
xdeciles = np.array([segment[0, 0] for segment in ax.collections[-2].get_segments()])
assert np.allclose(xdeciles, explanation.data['feature_deciles'][feature][0][1:-1])
if ax.collections[-1].get_segments() is not None and explanation.data['feature_deciles'][feature][1] is not None:
ydeciles = np.array([segment[0, 1] for segment in ax.collections[-1].get_segments()])
assert np.allclose(ydeciles, explanation.data['feature_deciles'][feature][1][1:-1])
</DeepExtract>
assert ax.get_xlabel() == explanation.data['feature_names'][feature][0]
assert ax.get_ylabel() == explanation.data['feature_names'][feature][1]
|
@pytest.mark.parametrize('explanation', ['average'], indirect=True)
def test__plot_two_pd_num_num(explanation):
""" Test the `_plot_two_pd_num_num` function. """
(feature, target_idx) = (4, 0)
(_, ax) = plt.subplots()
(ax, _) = _plot_two_pd_num_num(exp=explanation, feature=feature, target_idx=target_idx, ax=ax)
assert np.allclose(ax.get_xlim(), explanation.data['feature_values'][feature][0])
assert np.allclose(ax.get_ylim(), explanation.data['feature_values'][feature][1])
if ax.collections[-2].get_segments() is not None and explanation.data['feature_deciles'][feature][0] is not None:
xdeciles = np.array([segment[0, 0] for segment in ax.collections[-2].get_segments()])
assert np.allclose(xdeciles, explanation.data['feature_deciles'][feature][0][1:-1])
if ax.collections[-1].get_segments() is not None and explanation.data['feature_deciles'][feature][1] is not None:
ydeciles = np.array([segment[0, 1] for segment in ax.collections[-1].get_segments()])
assert np.allclose(ydeciles, explanation.data['feature_deciles'][feature][1][1:-1])
assert ax.get_xlabel() == explanation.data['feature_names'][feature][0]
assert ax.get_ylabel() == explanation.data['feature_names'][feature][1]
|
alibi
|
positive
|
def sample(self, sample_type=0, broadcast_message=None, **kwargs):
"""
Overrides base method by employing single underlying stochastic process to generate two tragectories
Args:
sample_type: bool, train/test
**kwargs:
Returns:
sample as PairWaveModelGenerator instance
"""
if self.metadata['type'] is not None:
if self.metadata['type'] != sample_type:
self.log.warning('Attempt to sample type {} given current sample type {}, overriden.'.format(sample_type, self.metadata['type']))
sample_type = self.metadata['type']
sample = PairWaveModelGenerator(data_names=self.data_names, generator_parameters_config=self.generator_parameters_config, data_class_ref=self.data_class_ref, name='sub_' + self.name, _top_level=False, **self.nested_kwargs)
sample.names = self.names
if self.get_new_sample:
params = self.generator_parameters_fn(**self.generator_parameters_config)
<DeepExtract>
data = self.generator_fn(num_points=self.data[self.a1_name].episode_num_records, **params)
p1_dict = {'mean': data[0, 0, :], 'maximum': data[0, 1, :], 'minimum': data[0, 2, :], 'last': data[0, 3, :]}
p2_dict = {'mean': data[1, 0, :], 'maximum': data[1, 1, :], 'minimum': data[1, 2, :], 'last': data[1, 3, :]}
if sample_type:
index = self.data[self.a1_name].test_index
else:
index = self.data[self.a1_name].train_index
df1 = pd.DataFrame(data={name: p1_dict[self.columns_map[name]] for name in self.names}, index=index)
df2 = pd.DataFrame(data={name: p2_dict[self.columns_map[name]] for name in self.names}, index=index)
(data1, data2) = (df1, df2)
</DeepExtract>
metadata = {'generator': params}
else:
data1 = None
data2 = None
metadata = {}
metadata.update({'type': sample_type, 'sample_num': self.sample_num, 'parent_sample_type': self.metadata['type'], 'parent_sample_num': self.sample_num, 'first_row': 0, 'last_row': self.data[self.a1_name].episode_num_records})
sample.metadata = copy.deepcopy(metadata)
sample.data[self.a1_name].data = data1
sample.data[self.a2_name].data = data2
sample.filename = {key: stream.filename for (key, stream) in self.data.items()}
self.sample_num += 1
return sample
|
def sample(self, sample_type=0, broadcast_message=None, **kwargs):
"""
Overrides base method by employing single underlying stochastic process to generate two tragectories
Args:
sample_type: bool, train/test
**kwargs:
Returns:
sample as PairWaveModelGenerator instance
"""
if self.metadata['type'] is not None:
if self.metadata['type'] != sample_type:
self.log.warning('Attempt to sample type {} given current sample type {}, overriden.'.format(sample_type, self.metadata['type']))
sample_type = self.metadata['type']
sample = PairWaveModelGenerator(data_names=self.data_names, generator_parameters_config=self.generator_parameters_config, data_class_ref=self.data_class_ref, name='sub_' + self.name, _top_level=False, **self.nested_kwargs)
sample.names = self.names
if self.get_new_sample:
params = self.generator_parameters_fn(**self.generator_parameters_config)
data = self.generator_fn(num_points=self.data[self.a1_name].episode_num_records, **params)
p1_dict = {'mean': data[0, 0, :], 'maximum': data[0, 1, :], 'minimum': data[0, 2, :], 'last': data[0, 3, :]}
p2_dict = {'mean': data[1, 0, :], 'maximum': data[1, 1, :], 'minimum': data[1, 2, :], 'last': data[1, 3, :]}
if sample_type:
index = self.data[self.a1_name].test_index
else:
index = self.data[self.a1_name].train_index
df1 = pd.DataFrame(data={name: p1_dict[self.columns_map[name]] for name in self.names}, index=index)
df2 = pd.DataFrame(data={name: p2_dict[self.columns_map[name]] for name in self.names}, index=index)
(data1, data2) = (df1, df2)
metadata = {'generator': params}
else:
data1 = None
data2 = None
metadata = {}
metadata.update({'type': sample_type, 'sample_num': self.sample_num, 'parent_sample_type': self.metadata['type'], 'parent_sample_num': self.sample_num, 'first_row': 0, 'last_row': self.data[self.a1_name].episode_num_records})
sample.metadata = copy.deepcopy(metadata)
sample.data[self.a1_name].data = data1
sample.data[self.a2_name].data = data2
sample.filename = {key: stream.filename for (key, stream) in self.data.items()}
self.sample_num += 1
return sample
|
btgym
|
positive
|
def correct_carries(carry_data: CarryData, player_carry_data: CarryData, player: Player, proto_game: game_pb2.Game) -> CarryData:
"""
This modifies carries to correct for certain situations at a better granularity.
"""
<DeepExtract>
hit_index = 0
previous_end_frame = 0
valid_frame_index = 0
for frame_index in range(len(player_carry_data.start_frames)):
starting_frame = player_carry_data.start_frames[frame_index]
while valid_frame_index < len(carry_data.end_frames) and carry_data.end_frames[valid_frame_index] < starting_frame:
valid_frame_index += 1
valid_start_frame = carry_data.start_frames[valid_frame_index]
if hit_index >= len(proto_game.game_stats.hits):
continue
while proto_game.game_stats.hits[hit_index].frame_number < starting_frame:
if hit_index == len(proto_game.game_stats.hits) - 1:
break
hit_index += 1
last_valid_hit = None
while hit_index >= 0 and proto_game.game_stats.hits[hit_index].player_id.id == player.id.id and (proto_game.game_stats.hits[hit_index].frame_number >= valid_start_frame) and (proto_game.game_stats.hits[hit_index].frame_number > previous_end_frame):
last_valid_hit = proto_game.game_stats.hits[hit_index]
hit_index -= 1
if last_valid_hit is not None:
hit_index += 1
if last_valid_hit is not None and last_valid_hit.frame_number < starting_frame:
player_carry_data.start_frames[frame_index] = last_valid_hit.frame_number
previous_end_frame = player_carry_data.end_frames[frame_index]
</DeepExtract>
<DeepExtract>
start_frames = player_carry_data.start_frames
end_frames = player_carry_data.end_frames
merged_start = []
merged_end = []
if len(start_frames) == 0:
return
if len(start_frames) == 1:
player_carry_data.start_frames = [player_carry_data.start_frames[0]]
player_carry_data.end_frames = [player_carry_data.end_frames[0]]
return
player_frame_index = 0
for frame_index in range(len(carry_data.end_frames)):
end_frame = carry_data.end_frames[frame_index]
start_frame = carry_data.start_frames[frame_index]
carry_end = None
if player_frame_index >= len(start_frames):
break
if start_frames[player_frame_index] > end_frame:
continue
while player_frame_index < len(start_frames) and start_frames[player_frame_index] < start_frame:
player_frame_index += 1
if player_frame_index >= len(start_frames):
break
if start_frames[player_frame_index] > end_frame:
continue
carry_start = start_frames[player_frame_index]
entered = False
while player_frame_index < len(start_frames) and end_frames[player_frame_index] < end_frame:
player_frame_index += 1
entered = True
if entered:
player_frame_index -= 1
carry_end = end_frames[player_frame_index]
if carry_end is not None:
merged_start.append(carry_start)
merged_end.append(carry_end)
player_carry_data.start_frames = merged_start
player_carry_data.end_frames = merged_end
</DeepExtract>
<DeepExtract>
start_frames = player_carry_data.start_frames
end_frames = player_carry_data.end_frames
def valid_hit_number(hit_index, carry_index):
return proto_game.game_stats.hits[hit_index].frame_number < end_frames[carry_index] and hit_index < len(proto_game.game_stats.hits) - 1
hit_index = 0
for carry_index in range(len(start_frames)):
while hit_index < len(proto_game.game_stats.hits) - 1:
if proto_game.game_stats.hits[hit_index].frame_number < start_frames[carry_index]:
hit_index += 1
continue
if proto_game.game_stats.hits[hit_index].frame_number >= end_frames[carry_index]:
break
invalid_hit = None
last_player_hit = None
while valid_hit_number(hit_index, carry_index):
if proto_game.game_stats.hits[hit_index].player_id.id != player.id.id:
if invalid_hit is None:
invalid_hit = proto_game.game_stats.hits[hit_index]
while proto_game.game_stats.hits[hit_index].player_id.id != player.id.id and valid_hit_number(hit_index, carry_index):
hit_index += 1
if proto_game.game_stats.hits[hit_index].frame_number >= end_frames[carry_index]:
player_carry_data.end_frames[carry_index] = invalid_hit.frame_number
else:
hit_index += 1
if valid_hit_number(hit_index - 1, carry_index) and proto_game.game_stats.hits[hit_index - 1].player_id.id == player.id.id:
last_player_hit = proto_game.game_stats.hits[hit_index - 1]
if last_player_hit is None:
logger.error('The player never hit the ball during the "carry"')
end_frames[carry_index] = start_frames[carry_index]
else:
most_recent_frame = max(last_player_hit.previous_hit_frame_number, start_frames[carry_index])
carry_frames = player_carry_data.carry_frames
dodge_data = carry_frames[player.name].dodge_active
dodge_data = dodge_data.loc[most_recent_frame:last_player_hit.frame_number]
has_flicked = dodge_data.where(dodge_data % 2 == 1).last_valid_index()
if has_flicked is not None:
ending = min(max(last_player_hit.frame_number + 10, end_frames[carry_index]), has_flicked + 20)
is_going_up = carry_frames.ball.pos_z[last_player_hit.frame_number:ending].is_monotonic
end_frames[carry_index] = has_flicked
if is_going_up:
player_carry_data.add_flick(carry_index, True)
else:
player_carry_data.add_flick(carry_index, False)
elif last_player_hit.frame_number > start_frames[carry_index]:
end_frames[carry_index] = last_player_hit.frame_number
if hit_index >= len(proto_game.game_stats.hits) - 1:
break
</DeepExtract>
return player_carry_data
|
def correct_carries(carry_data: CarryData, player_carry_data: CarryData, player: Player, proto_game: game_pb2.Game) -> CarryData:
"""
This modifies carries to correct for certain situations at a better granularity.
"""
hit_index = 0
previous_end_frame = 0
valid_frame_index = 0
for frame_index in range(len(player_carry_data.start_frames)):
starting_frame = player_carry_data.start_frames[frame_index]
while valid_frame_index < len(carry_data.end_frames) and carry_data.end_frames[valid_frame_index] < starting_frame:
valid_frame_index += 1
valid_start_frame = carry_data.start_frames[valid_frame_index]
if hit_index >= len(proto_game.game_stats.hits):
continue
while proto_game.game_stats.hits[hit_index].frame_number < starting_frame:
if hit_index == len(proto_game.game_stats.hits) - 1:
break
hit_index += 1
last_valid_hit = None
while hit_index >= 0 and proto_game.game_stats.hits[hit_index].player_id.id == player.id.id and (proto_game.game_stats.hits[hit_index].frame_number >= valid_start_frame) and (proto_game.game_stats.hits[hit_index].frame_number > previous_end_frame):
last_valid_hit = proto_game.game_stats.hits[hit_index]
hit_index -= 1
if last_valid_hit is not None:
hit_index += 1
if last_valid_hit is not None and last_valid_hit.frame_number < starting_frame:
player_carry_data.start_frames[frame_index] = last_valid_hit.frame_number
previous_end_frame = player_carry_data.end_frames[frame_index]
start_frames = player_carry_data.start_frames
end_frames = player_carry_data.end_frames
merged_start = []
merged_end = []
if len(start_frames) == 0:
return
if len(start_frames) == 1:
player_carry_data.start_frames = [player_carry_data.start_frames[0]]
player_carry_data.end_frames = [player_carry_data.end_frames[0]]
return
player_frame_index = 0
for frame_index in range(len(carry_data.end_frames)):
end_frame = carry_data.end_frames[frame_index]
start_frame = carry_data.start_frames[frame_index]
carry_end = None
if player_frame_index >= len(start_frames):
break
if start_frames[player_frame_index] > end_frame:
continue
while player_frame_index < len(start_frames) and start_frames[player_frame_index] < start_frame:
player_frame_index += 1
if player_frame_index >= len(start_frames):
break
if start_frames[player_frame_index] > end_frame:
continue
carry_start = start_frames[player_frame_index]
entered = False
while player_frame_index < len(start_frames) and end_frames[player_frame_index] < end_frame:
player_frame_index += 1
entered = True
if entered:
player_frame_index -= 1
carry_end = end_frames[player_frame_index]
if carry_end is not None:
merged_start.append(carry_start)
merged_end.append(carry_end)
player_carry_data.start_frames = merged_start
player_carry_data.end_frames = merged_end
start_frames = player_carry_data.start_frames
end_frames = player_carry_data.end_frames
def valid_hit_number(hit_index, carry_index):
return proto_game.game_stats.hits[hit_index].frame_number < end_frames[carry_index] and hit_index < len(proto_game.game_stats.hits) - 1
hit_index = 0
for carry_index in range(len(start_frames)):
while hit_index < len(proto_game.game_stats.hits) - 1:
if proto_game.game_stats.hits[hit_index].frame_number < start_frames[carry_index]:
hit_index += 1
continue
if proto_game.game_stats.hits[hit_index].frame_number >= end_frames[carry_index]:
break
invalid_hit = None
last_player_hit = None
while valid_hit_number(hit_index, carry_index):
if proto_game.game_stats.hits[hit_index].player_id.id != player.id.id:
if invalid_hit is None:
invalid_hit = proto_game.game_stats.hits[hit_index]
while proto_game.game_stats.hits[hit_index].player_id.id != player.id.id and valid_hit_number(hit_index, carry_index):
hit_index += 1
if proto_game.game_stats.hits[hit_index].frame_number >= end_frames[carry_index]:
player_carry_data.end_frames[carry_index] = invalid_hit.frame_number
else:
hit_index += 1
if valid_hit_number(hit_index - 1, carry_index) and proto_game.game_stats.hits[hit_index - 1].player_id.id == player.id.id:
last_player_hit = proto_game.game_stats.hits[hit_index - 1]
if last_player_hit is None:
logger.error('The player never hit the ball during the "carry"')
end_frames[carry_index] = start_frames[carry_index]
else:
most_recent_frame = max(last_player_hit.previous_hit_frame_number, start_frames[carry_index])
carry_frames = player_carry_data.carry_frames
dodge_data = carry_frames[player.name].dodge_active
dodge_data = dodge_data.loc[most_recent_frame:last_player_hit.frame_number]
has_flicked = dodge_data.where(dodge_data % 2 == 1).last_valid_index()
if has_flicked is not None:
ending = min(max(last_player_hit.frame_number + 10, end_frames[carry_index]), has_flicked + 20)
is_going_up = carry_frames.ball.pos_z[last_player_hit.frame_number:ending].is_monotonic
end_frames[carry_index] = has_flicked
if is_going_up:
player_carry_data.add_flick(carry_index, True)
else:
player_carry_data.add_flick(carry_index, False)
elif last_player_hit.frame_number > start_frames[carry_index]:
end_frames[carry_index] = last_player_hit.frame_number
if hit_index >= len(proto_game.game_stats.hits) - 1:
break
return player_carry_data
|
carball
|
positive
|
def update(self, b, x):
last_x = x[-1, :]
self.r_hat = self.beta * self.r_hat + (1 - self.beta) * last_x
(R, Z) = risk.polar_returns(-x, self.k)
<DeepExtract>
alpha = safe_div(R.shape[0] - 1, np.log(safe_div(R[:-1], R[-1])).sum())
</DeepExtract>
for i in range(self.score.shape[0]):
self.score[i] = self.score[i] * self.beta + (1 - self.beta) * np.dot(last_x, self.w[i])
cons = self.cons + [{'type': 'eq', 'fun': lambda w: np.dot(w, self.r_hat) - np.clip(0.001, 0.0, self.r_hat.max())}]
best_w = self.w[np.argmax(self.score)]
if np.allclose(b, best_w, 0.01, 0.01):
action = 'follow'
else:
action = 'pursuit'
leader = np.zeros_like(last_x)
leader[np.argmax(last_x)] = -1
self.w[0] = minimize(self.loss_tf, self.opt.optimize(leader, self.w[0]), args=(alpha, Z, last_x), constraints=self.cons, options={'maxiter': 666}, tol=1e-07, bounds=tuple(((0, 1) for _ in range(b.shape[0]))))['x']
self.w[1] = minimize(self.loss_eri, self.w[1], args=(alpha, Z, self.w[1]), constraints=cons, options={'maxiter': 666}, tol=1e-07, bounds=tuple(((0, 1) for _ in range(b.shape[0]))))['x']
if action == 'follow':
b = simplex_proj(self.w[np.argmax(self.score)])
elif action == 'pursuit':
b = simplex_proj(self.pe.optimize(self.w[np.argmax(self.score)], b))
self.log['score'] = 'tf: %.4f, mr: %.4f' % (self.score[0], self.score[1])
self.log['ERI'] = '%.8f' % risk.ERI(*risk.polar_returns(-x, self.k), b)
self.log['TCVaR'] = '%.2f' % risk.TCVaR(*risk.fit_t(np.dot(x, b)))
self.log['alpha'] = '%.2f' % alpha
self.log['CC'] = '%.2f' % np.power(b, 2).sum() ** (-1)
self.log['action'] = action
self.log['lr'] = '%.2f' % self.opt.lr
self.log['beta'] = '%.2f' % self.beta
self.log['mpc'] = '%.2f' % self.mpc
return b
|
def update(self, b, x):
last_x = x[-1, :]
self.r_hat = self.beta * self.r_hat + (1 - self.beta) * last_x
(R, Z) = risk.polar_returns(-x, self.k)
alpha = safe_div(R.shape[0] - 1, np.log(safe_div(R[:-1], R[-1])).sum())
for i in range(self.score.shape[0]):
self.score[i] = self.score[i] * self.beta + (1 - self.beta) * np.dot(last_x, self.w[i])
cons = self.cons + [{'type': 'eq', 'fun': lambda w: np.dot(w, self.r_hat) - np.clip(0.001, 0.0, self.r_hat.max())}]
best_w = self.w[np.argmax(self.score)]
if np.allclose(b, best_w, 0.01, 0.01):
action = 'follow'
else:
action = 'pursuit'
leader = np.zeros_like(last_x)
leader[np.argmax(last_x)] = -1
self.w[0] = minimize(self.loss_tf, self.opt.optimize(leader, self.w[0]), args=(alpha, Z, last_x), constraints=self.cons, options={'maxiter': 666}, tol=1e-07, bounds=tuple(((0, 1) for _ in range(b.shape[0]))))['x']
self.w[1] = minimize(self.loss_eri, self.w[1], args=(alpha, Z, self.w[1]), constraints=cons, options={'maxiter': 666}, tol=1e-07, bounds=tuple(((0, 1) for _ in range(b.shape[0]))))['x']
if action == 'follow':
b = simplex_proj(self.w[np.argmax(self.score)])
elif action == 'pursuit':
b = simplex_proj(self.pe.optimize(self.w[np.argmax(self.score)], b))
self.log['score'] = 'tf: %.4f, mr: %.4f' % (self.score[0], self.score[1])
self.log['ERI'] = '%.8f' % risk.ERI(*risk.polar_returns(-x, self.k), b)
self.log['TCVaR'] = '%.2f' % risk.TCVaR(*risk.fit_t(np.dot(x, b)))
self.log['alpha'] = '%.2f' % alpha
self.log['CC'] = '%.2f' % np.power(b, 2).sum() ** (-1)
self.log['action'] = action
self.log['lr'] = '%.2f' % self.opt.lr
self.log['beta'] = '%.2f' % self.beta
self.log['mpc'] = '%.2f' % self.mpc
return b
|
cryptotrader
|
positive
|
def __init__(self, goal_dict, game: 'Game'):
self.player_name = goal_dict['PlayerName']
self.player_team = goal_dict['PlayerTeam']
<DeepExtract>
for player in game.players:
if player.name == self.player_name:
self.player = player
</DeepExtract>
self.frame_number = goal_dict['frame']
logger.debug('Created Goal: %s', self)
|
def __init__(self, goal_dict, game: 'Game'):
self.player_name = goal_dict['PlayerName']
self.player_team = goal_dict['PlayerTeam']
for player in game.players:
if player.name == self.player_name:
self.player = player
self.frame_number = goal_dict['frame']
logger.debug('Created Goal: %s', self)
|
carball
|
positive
|
@pytest.mark.django_db
def test_tag_delete_403_if_not_authorized(client, django_user_model):
"""Tests for 403 error for tag delete if inadequate permissions."""
<DeepExtract>
tag = models.PlanTag.objects.create(tag=tag_text)
</DeepExtract>
django_user_model.objects.create_user(username='user', password='password')
client.login(username='user', password='password')
response = client.get(reverse('dfs_tag_delete', kwargs={'tag_id': tag.id}))
assert response.status_code == 403
|
@pytest.mark.django_db
def test_tag_delete_403_if_not_authorized(client, django_user_model):
"""Tests for 403 error for tag delete if inadequate permissions."""
tag = models.PlanTag.objects.create(tag=tag_text)
django_user_model.objects.create_user(username='user', password='password')
client.login(username='user', password='password')
response = client.get(reverse('dfs_tag_delete', kwargs={'tag_id': tag.id}))
assert response.status_code == 403
|
django-flexible-subscriptions
|
positive
|
def validateURL(self, url):
"""
Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool}
"""
<DeepExtract>
(proto, netloc, path, params, query, frag) = urlparse(url)
path = urlunparse(('', '', path, params, query, frag))
if ':' in netloc:
try:
(host, port) = netloc.split(':')
except ValueError:
url_parts = None
else:
host = netloc
port = ''
host = host.lower()
if not path:
path = '/'
url_parts = (proto, host, port, path)
</DeepExtract>
if url_parts is None:
return False
(proto, host, port, path) = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif not host.endswith(self.host) and '.' + host != self.host:
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
if trust_prefix != url_prefix:
return False
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return self.path[-1] in allowed or path[path_len] in allowed
return True
|
def validateURL(self, url):
"""
Validates a URL against this trust root.
@param url: The URL to check
@type url: C{str}
@return: Whether the given URL is within this trust root.
@rtype: C{bool}
"""
(proto, netloc, path, params, query, frag) = urlparse(url)
path = urlunparse(('', '', path, params, query, frag))
if ':' in netloc:
try:
(host, port) = netloc.split(':')
except ValueError:
url_parts = None
else:
host = netloc
port = ''
host = host.lower()
if not path:
path = '/'
url_parts = (proto, host, port, path)
if url_parts is None:
return False
(proto, host, port, path) = url_parts
if proto != self.proto:
return False
if port != self.port:
return False
if '*' in host:
return False
if not self.wildcard:
if host != self.host:
return False
elif not host.endswith(self.host) and '.' + host != self.host:
return False
if path != self.path:
path_len = len(self.path)
trust_prefix = self.path[:path_len]
url_prefix = path[:path_len]
if trust_prefix != url_prefix:
return False
if '?' in self.path:
allowed = '&'
else:
allowed = '?/'
return self.path[-1] in allowed or path[path_len] in allowed
return True
|
djangopeople.net
|
positive
|
def check_version(self):
self.client_id = self.connection_props['client_id']
server_version_str = self.connection_props['server_protocol_version']
if not server_version_str:
log.warn('Server version not reported. Can not check version compatibility.')
else:
server_version = semvar(server_version_str).version
if c.MAJOR_MINOR_VERSION != server_version[:2]:
<DeepExtract>
self.close_sim()
log.error('Server client version mismatch server@%s client@%s - closed sim' % (server_version_str, self.client_version))
sim_url = util.ensure_sim.get_latest_sim_url()
if sim_url:
answer = input("We've found a version of the sim which matches your client. Would you like to download it now? [y/n] ")
if answer.lower().strip() == 'y':
backup_dir = os.path.join(c.DEEPDRIVE_DIR, '%s-%s' % (c.SIM_PREFIX, server_version_str))
log.warn('Backing up old sim to %s', backup_dir)
shutil.move(util.ensure_sim.get_sim_path(), backup_dir)
util.ensure_sim.ensure_sim()
self.open_sim()
</DeepExtract>
return False
return True
|
def check_version(self):
self.client_id = self.connection_props['client_id']
server_version_str = self.connection_props['server_protocol_version']
if not server_version_str:
log.warn('Server version not reported. Can not check version compatibility.')
else:
server_version = semvar(server_version_str).version
if c.MAJOR_MINOR_VERSION != server_version[:2]:
self.close_sim()
log.error('Server client version mismatch server@%s client@%s - closed sim' % (server_version_str, self.client_version))
sim_url = util.ensure_sim.get_latest_sim_url()
if sim_url:
answer = input("We've found a version of the sim which matches your client. Would you like to download it now? [y/n] ")
if answer.lower().strip() == 'y':
backup_dir = os.path.join(c.DEEPDRIVE_DIR, '%s-%s' % (c.SIM_PREFIX, server_version_str))
log.warn('Backing up old sim to %s', backup_dir)
shutil.move(util.ensure_sim.get_sim_path(), backup_dir)
util.ensure_sim.ensure_sim()
self.open_sim()
return False
return True
|
deepdrive
|
positive
|
def get_multi_batches(self, batch_size, num_batches_per_step, num_steps=None, shuffle=False, cluster=False):
batch_size_per_step = batch_size * num_batches_per_step
<DeepExtract>
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size_per_step))
if num_steps is None:
num_steps = num_batches_per_epoch
num_epochs = int(math.ceil(num_steps / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
sorted_grouped = lambda : list(grouper(sorted_idxs, batch_size_per_step))
grouped = lambda : random.sample(sorted_grouped(), num_batches_per_epoch)
else:
random_grouped = lambda : list(grouper(random_idxs, batch_size_per_step))
grouped = random_grouped
else:
raw_grouped = lambda : list(grouper(self.valid_idxs, batch_size_per_step))
grouped = raw_grouped
batch_idx_tuples = itertools.chain.from_iterable((grouped() for _ in range(num_epochs)))
for _ in range(num_steps):
batch_idxs = tuple((i for i in next(batch_idx_tuples) if i is not None))
batch_data = self.get_by_idxs(batch_idxs)
shared_batch_data = {}
for (key, val) in batch_data.items():
if key.startswith('*'):
assert self.shared is not None
shared_key = key[1:]
shared_batch_data[shared_key] = [index(self.shared[shared_key], each) for each in val]
batch_data.update(shared_batch_data)
batch_ds = DataSet(batch_data, self.data_type, shared=self.shared)
yield (batch_idxs, batch_ds)
</DeepExtract>
multi_batches = (tuple(zip(grouper(idxs, batch_size, shorten=True, num_groups=num_batches_per_step), data_set.divide(num_batches_per_step))) for (idxs, data_set) in batches)
return multi_batches
|
def get_multi_batches(self, batch_size, num_batches_per_step, num_steps=None, shuffle=False, cluster=False):
batch_size_per_step = batch_size * num_batches_per_step
num_batches_per_epoch = int(math.ceil(self.num_examples / batch_size_per_step))
if num_steps is None:
num_steps = num_batches_per_epoch
num_epochs = int(math.ceil(num_steps / num_batches_per_epoch))
if shuffle:
random_idxs = random.sample(self.valid_idxs, len(self.valid_idxs))
if cluster:
sorted_idxs = sorted(random_idxs, key=self._sort_key)
sorted_grouped = lambda : list(grouper(sorted_idxs, batch_size_per_step))
grouped = lambda : random.sample(sorted_grouped(), num_batches_per_epoch)
else:
random_grouped = lambda : list(grouper(random_idxs, batch_size_per_step))
grouped = random_grouped
else:
raw_grouped = lambda : list(grouper(self.valid_idxs, batch_size_per_step))
grouped = raw_grouped
batch_idx_tuples = itertools.chain.from_iterable((grouped() for _ in range(num_epochs)))
for _ in range(num_steps):
batch_idxs = tuple((i for i in next(batch_idx_tuples) if i is not None))
batch_data = self.get_by_idxs(batch_idxs)
shared_batch_data = {}
for (key, val) in batch_data.items():
if key.startswith('*'):
assert self.shared is not None
shared_key = key[1:]
shared_batch_data[shared_key] = [index(self.shared[shared_key], each) for each in val]
batch_data.update(shared_batch_data)
batch_ds = DataSet(batch_data, self.data_type, shared=self.shared)
yield (batch_idxs, batch_ds)
multi_batches = (tuple(zip(grouper(idxs, batch_size, shorten=True, num_groups=num_batches_per_step), data_set.divide(num_batches_per_step))) for (idxs, data_set) in batches)
return multi_batches
|
convai-bot-1337
|
positive
|
def delete_temp_dirs(checkpoint: str, dir_to_del: list, base_dir: str) -> None:
"""This function deletes the directories of the given list".
Parameters
----------
checkpoint: str
Path to a file. Used to ensure, that the temporary directories we want to delete are not useful anymore, and to verify that the subject and session are right.
dir_to_del: list
Names of the directories we want to delete.
base_dir: str
Path to the working directory.
"""
import shutil
from pathlib import Path
from clinica.utils.stream import cprint
<DeepExtract>
from pathlib import Path
subject_session_folder_name = Path(Path(checkpoint).parent).parent.name
</DeepExtract>
for a in dir_to_del:
for z in Path(base_dir).rglob(f'*{a}*'):
if Path(z).parent.name == subject_session_folder_name:
shutil.rmtree(z)
cprint(msg=f'Temporary folder {z} deleted', lvl='info')
|
def delete_temp_dirs(checkpoint: str, dir_to_del: list, base_dir: str) -> None:
"""This function deletes the directories of the given list".
Parameters
----------
checkpoint: str
Path to a file. Used to ensure, that the temporary directories we want to delete are not useful anymore, and to verify that the subject and session are right.
dir_to_del: list
Names of the directories we want to delete.
base_dir: str
Path to the working directory.
"""
import shutil
from pathlib import Path
from clinica.utils.stream import cprint
from pathlib import Path
subject_session_folder_name = Path(Path(checkpoint).parent).parent.name
for a in dir_to_del:
for z in Path(base_dir).rglob(f'*{a}*'):
if Path(z).parent.name == subject_session_folder_name:
shutil.rmtree(z)
cprint(msg=f'Temporary folder {z} deleted', lvl='info')
|
clinica
|
positive
|
def enum_to_string(attr, val):
<DeepExtract>
attr_name = attr.name
if attr_name.startswith('User'):
attr_name = attr_name[4:]
attr_enum = getattr(ua, attr_name)
</DeepExtract>
string = ', '.join([e.name for e in attr_enum.parse_bitfield(val)])
return string
|
def enum_to_string(attr, val):
attr_name = attr.name
if attr_name.startswith('User'):
attr_name = attr_name[4:]
attr_enum = getattr(ua, attr_name)
string = ', '.join([e.name for e in attr_enum.parse_bitfield(val)])
return string
|
Converter-for-OPCUA
|
positive
|
def mujoco_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
<DeepExtract>
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
</DeepExtract>
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
def mujoco_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(1000000.0))
return parser
|
deepdrive
|
positive
|
def NewClearDisplay(self, pos, time):
<DeepExtract>
if pos == 0:
self.i2c.writeReg(0, 128 | 0)
elif pos == 1:
self.i2c.writeReg(0, 128 | 32 | 0)
</DeepExtract>
<DeepExtract>
for c in ' ':
self.i2c.writeReg(64, ord(c))
time.sleep(time / 200.0)
</DeepExtract>
|
def NewClearDisplay(self, pos, time):
if pos == 0:
self.i2c.writeReg(0, 128 | 0)
elif pos == 1:
self.i2c.writeReg(0, 128 | 32 | 0)
for c in ' ':
self.i2c.writeReg(64, ord(c))
time.sleep(time / 200.0)
</DeepExtract>
|
akilib
|
positive
|
def show(self, scrollToTop=True):
"""Show application view."""
utils.containerRemoveAll(self.box)
self.itemDict.clear()
utils.containerRemoveAll(self.eventbox)
self.eventbox.add(self.box)
if self.appNum != 0:
appList = self.getListFunc((self.pageIndex - 1) * self.defaultRows, min(self.pageIndex * self.defaultRows, self.appNum))
self.box.pack_start(self.createAppList(appList))
<DeepExtract>
paddingX = 5
paddingY = 10
if self.appNum > self.defaultRows * self.pageSize:
box = gtk.HBox()
align = gtk.Alignment()
align.set(0.5, 1.0, 0.0, 0.0)
align.set_padding(paddingY, paddingY, paddingX, paddingX)
align.add(box)
if self.pageIndex % self.pageSize == 0:
(startIndex, endIndex) = (max(1, (self.pageIndex - 1) / self.pageSize * self.pageSize + 1), min(self.pageIndex + 1, self.maxPageIndex + 1))
else:
(startIndex, endIndex) = (int(self.pageIndex / self.pageSize) * self.pageSize + 1, min((int(self.pageIndex / self.pageSize) + 1) * self.pageSize + 1, self.maxPageIndex + 1))
if startIndex != 1:
prevButton = setHoverButton(appTheme.getDynamicPixbuf('index/backward_normal.png'), appTheme.getDynamicPixbuf('index/backward_hover.png'))
prevButton.connect('button-press-event', lambda widget, event: self.jumpPage(max(1, (self.pageIndex - 1) / self.pageSize * self.pageSize)))
prevAlign = gtk.Alignment()
prevAlign.set(0.5, 0.5, 0.0, 0.0)
prevAlign.add(prevButton)
box.pack_start(prevAlign, False, False, paddingX)
firstBox = self.createNumIcon(1)
firstLabel = gtk.Label()
firstLabel.set_markup("<span foreground='%s' size='%s'> ... </span>" % (appTheme.getDynamicColor('index').getColor(), LABEL_FONT_MEDIUM_SIZE))
box.pack_start(firstBox)
box.pack_start(firstLabel)
for i in range(startIndex, endIndex):
box.pack_start(self.createNumIcon(i), False, False, paddingX)
if endIndex - 1 != self.maxPageIndex:
lastBox = self.createNumIcon(self.maxPageIndex)
lastLabel = gtk.Label()
lastLabel.set_markup("<span foreground='%s' size='%s'> ... </span>" % (appTheme.getDynamicColor('index').getColor(), LABEL_FONT_MEDIUM_SIZE))
box.pack_start(lastLabel)
box.pack_start(lastBox)
nextButton = setHoverButton(appTheme.getDynamicPixbuf('index/forward_normal.png'), appTheme.getDynamicPixbuf('index/forward_hover.png'))
nextButton.connect('button-press-event', lambda widget, event: self.jumpPage(min(self.maxPageIndex, ((self.pageIndex - 1) / self.pageSize + 1) * self.pageSize + 1)))
nextAlign = gtk.Alignment()
nextAlign.set(0.5, 0.5, 0.0, 0.0)
nextAlign.add(nextButton)
box.pack_start(nextAlign, False, False, paddingX)
spinButton = gtk.SpinButton()
spinButton.set_digits(0)
spinButton.set_increments(1, self.defaultRows)
spinButton.set_range(1, self.maxPageIndex)
spinButton.set_value(self.pageIndex)
self.jumpButton = spinButton
jumpBeforeLabel = gtk.Label()
jumpBeforeLabel.set_markup("<span size='%s'>%s</span>" % (LABEL_FONT_MEDIUM_SIZE, __('Jump To')))
jumpAfterLabel = gtk.Label()
jumpAfterLabel.set_markup("<span size='%s'>%s</span>" % (LABEL_FONT_MEDIUM_SIZE, __('Page')))
jumpButton = utils.newButtonWithoutPadding()
jumpButton.connect('button-release-event', lambda widget, event: self.jumpPage(int(self.jumpButton.get_text())))
drawButton(jumpButton, 'confirm', 'index', False, __('Jump'), BUTTON_FONT_SIZE_SMALL, 'buttonFont')
box.pack_start(jumpBeforeLabel, False, False, paddingX)
box.pack_start(spinButton, False, False, paddingX)
box.pack_start(jumpAfterLabel, False, False, paddingX)
box.pack_start(jumpButton, False, False, paddingX)
indexbar = align
elif self.appNum > self.defaultRows:
box = gtk.HBox()
align = gtk.Alignment()
align.set(0.5, 1.0, 0.0, 0.0)
align.set_padding(paddingY, paddingY, paddingX, paddingX)
align.add(box)
for i in range(1, self.maxPageIndex + 1):
box.pack_start(self.createNumIcon(i), False, False, paddingX)
indexbar = align
else:
indexbar = None
</DeepExtract>
if not indexbar == None:
self.box.pack_start(indexbar)
self.box.show_all()
self.fetchVoteCallback(self.pageId, map(lambda appInfo: utils.getPkgName(appInfo.pkg), appList), self.isSearchPage)
if scrollToTop:
utils.scrollToTop(self.scrolledwindow)
|
def show(self, scrollToTop=True):
"""Show application view."""
utils.containerRemoveAll(self.box)
self.itemDict.clear()
utils.containerRemoveAll(self.eventbox)
self.eventbox.add(self.box)
if self.appNum != 0:
appList = self.getListFunc((self.pageIndex - 1) * self.defaultRows, min(self.pageIndex * self.defaultRows, self.appNum))
self.box.pack_start(self.createAppList(appList))
paddingX = 5
paddingY = 10
if self.appNum > self.defaultRows * self.pageSize:
box = gtk.HBox()
align = gtk.Alignment()
align.set(0.5, 1.0, 0.0, 0.0)
align.set_padding(paddingY, paddingY, paddingX, paddingX)
align.add(box)
if self.pageIndex % self.pageSize == 0:
(startIndex, endIndex) = (max(1, (self.pageIndex - 1) / self.pageSize * self.pageSize + 1), min(self.pageIndex + 1, self.maxPageIndex + 1))
else:
(startIndex, endIndex) = (int(self.pageIndex / self.pageSize) * self.pageSize + 1, min((int(self.pageIndex / self.pageSize) + 1) * self.pageSize + 1, self.maxPageIndex + 1))
if startIndex != 1:
prevButton = setHoverButton(appTheme.getDynamicPixbuf('index/backward_normal.png'), appTheme.getDynamicPixbuf('index/backward_hover.png'))
prevButton.connect('button-press-event', lambda widget, event: self.jumpPage(max(1, (self.pageIndex - 1) / self.pageSize * self.pageSize)))
prevAlign = gtk.Alignment()
prevAlign.set(0.5, 0.5, 0.0, 0.0)
prevAlign.add(prevButton)
box.pack_start(prevAlign, False, False, paddingX)
firstBox = self.createNumIcon(1)
firstLabel = gtk.Label()
firstLabel.set_markup("<span foreground='%s' size='%s'> ... </span>" % (appTheme.getDynamicColor('index').getColor(), LABEL_FONT_MEDIUM_SIZE))
box.pack_start(firstBox)
box.pack_start(firstLabel)
for i in range(startIndex, endIndex):
box.pack_start(self.createNumIcon(i), False, False, paddingX)
if endIndex - 1 != self.maxPageIndex:
lastBox = self.createNumIcon(self.maxPageIndex)
lastLabel = gtk.Label()
lastLabel.set_markup("<span foreground='%s' size='%s'> ... </span>" % (appTheme.getDynamicColor('index').getColor(), LABEL_FONT_MEDIUM_SIZE))
box.pack_start(lastLabel)
box.pack_start(lastBox)
nextButton = setHoverButton(appTheme.getDynamicPixbuf('index/forward_normal.png'), appTheme.getDynamicPixbuf('index/forward_hover.png'))
nextButton.connect('button-press-event', lambda widget, event: self.jumpPage(min(self.maxPageIndex, ((self.pageIndex - 1) / self.pageSize + 1) * self.pageSize + 1)))
nextAlign = gtk.Alignment()
nextAlign.set(0.5, 0.5, 0.0, 0.0)
nextAlign.add(nextButton)
box.pack_start(nextAlign, False, False, paddingX)
spinButton = gtk.SpinButton()
spinButton.set_digits(0)
spinButton.set_increments(1, self.defaultRows)
spinButton.set_range(1, self.maxPageIndex)
spinButton.set_value(self.pageIndex)
self.jumpButton = spinButton
jumpBeforeLabel = gtk.Label()
jumpBeforeLabel.set_markup("<span size='%s'>%s</span>" % (LABEL_FONT_MEDIUM_SIZE, __('Jump To')))
jumpAfterLabel = gtk.Label()
jumpAfterLabel.set_markup("<span size='%s'>%s</span>" % (LABEL_FONT_MEDIUM_SIZE, __('Page')))
jumpButton = utils.newButtonWithoutPadding()
jumpButton.connect('button-release-event', lambda widget, event: self.jumpPage(int(self.jumpButton.get_text())))
drawButton(jumpButton, 'confirm', 'index', False, __('Jump'), BUTTON_FONT_SIZE_SMALL, 'buttonFont')
box.pack_start(jumpBeforeLabel, False, False, paddingX)
box.pack_start(spinButton, False, False, paddingX)
box.pack_start(jumpAfterLabel, False, False, paddingX)
box.pack_start(jumpButton, False, False, paddingX)
indexbar = align
elif self.appNum > self.defaultRows:
box = gtk.HBox()
align = gtk.Alignment()
align.set(0.5, 1.0, 0.0, 0.0)
align.set_padding(paddingY, paddingY, paddingX, paddingX)
align.add(box)
for i in range(1, self.maxPageIndex + 1):
box.pack_start(self.createNumIcon(i), False, False, paddingX)
indexbar = align
else:
indexbar = None
if not indexbar == None:
self.box.pack_start(indexbar)
self.box.show_all()
self.fetchVoteCallback(self.pageId, map(lambda appInfo: utils.getPkgName(appInfo.pkg), appList), self.isSearchPage)
if scrollToTop:
utils.scrollToTop(self.scrolledwindow)
|
deepin-software-center
|
positive
|
def run(self, interactive=False, jupyter=False, host_port=None, command=None, check_run=False, num_eval_batches=None, skip_benign=None, skip_attack=None, skip_misclassified=None, validate_config=None) -> int:
exit_code = 0
if self.no_docker:
if jupyter or interactive or command:
raise ValueError('jupyter, interactive, or bash commands only supported when running Docker containers.')
runner = self.manager.start_armory_instance(envs=self.extra_env_vars)
try:
<DeepExtract>
log.info(bold(red('Running evaluation script')))
b64_config = self._b64_encode_config()
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
if self.no_docker:
kwargs = {}
python = sys.executable
else:
kwargs = {'user': self.get_id()}
python = 'python'
cmd = f'{python} -m armory.scenarios.main {b64_config}{options} --base64'
exit_code = runner.exec_cmd(cmd, **kwargs)
</DeepExtract>
except KeyboardInterrupt:
log.warning('Keyboard interrupt caught')
finally:
log.info('cleaning up...')
<DeepExtract>
log.info(f'deleting tmp_dir {self.tmp_dir}')
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if not isinstance(e, FileNotFoundError):
log.exception(f'Error removing tmp_dir {self.tmp_dir}')
try:
os.rmdir(self.output_dir)
log.warning(f'removed output_dir {self.output_dir} because it was empty')
except FileNotFoundError:
log.warning(f'output_dir {self.output_dir} was deleted or never created')
except OSError:
jsons = [x for x in os.listdir(self.output_dir) if x.endswith('.json')]
if len(jsons) == 1:
json = jsons[0]
else:
json = ''
output_path = os.path.join(self.output_dir, json)
log.info(f'results output written to:\n{output_path}')
</DeepExtract>
return exit_code
if check_run and (jupyter or interactive or command):
raise ValueError('check_run incompatible with interactive, jupyter, or command')
if jupyter or host_port:
if host_port:
ports = {host_port: host_port}
else:
ports = {8888: 8888}
else:
ports = None
try:
runner = self.manager.start_armory_instance(envs=self.extra_env_vars, ports=ports, user=self.get_id())
try:
if jupyter:
<DeepExtract>
if not self.root:
log.warning('Running Jupyter Lab as root inside the container.')
user_group_id = self.get_id()
port = list(ports.keys())[0]
tmp_dir = os.path.join(self.host_paths.tmp_dir, self.config['eval_id'])
os.makedirs(tmp_dir)
self.tmp_config = os.path.join(tmp_dir, 'interactive-config.json')
docker_config_path = os.path.join(paths.runtime_paths().tmp_dir, self.config['eval_id'], 'interactive-config.json')
with open(self.tmp_config, 'w') as f:
f.write(json.dumps(self.config, sort_keys=True, indent=4) + '\n')
init_options = self._constructor_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified)
lines = ['About to launch jupyter.', bold('# To connect on the command line as well, in a new terminal, run:'), bold(red(f'docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash')), '']
if 'scenario' in self.config:
lines.extend([bold('# To run, inside of a notebook:'), bold(red(f'from armory.scenarios.main import get as get_scenario\ns = get_scenario("{docker_config_path}"{init_options}).load()\ns.evaluate()')), ''])
lines.extend([bold('# To gracefully shut down container, press: Ctrl-C'), '', 'Jupyter notebook log:'])
log.info('\n'.join(lines))
runner.exec_cmd(f'jupyter lab --ip=0.0.0.0 --port {port} --no-browser', user=user_group_id, expect_sentinel=False)
</DeepExtract>
elif interactive:
<DeepExtract>
user_group_id = self.get_id()
lines = ['Container ready for interactive use.', bold('# In a new terminal, run the following to attach to the container:'), bold(red(f'docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash')), '']
if self.config.get('scenario'):
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
init_options = self._constructor_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified)
tmp_dir = os.path.join(self.host_paths.tmp_dir, self.config['eval_id'])
os.makedirs(tmp_dir)
self.tmp_config = os.path.join(tmp_dir, 'interactive-config.json')
docker_config_path = os.path.join(paths.runtime_paths().tmp_dir, self.config['eval_id'], 'interactive-config.json')
with open(self.tmp_config, 'w') as f:
f.write(json.dumps(self.config, sort_keys=True, indent=4) + '\n')
lines.extend([bold('# To run your scenario in the container:'), bold(red(f'python -m armory.scenarios.main {docker_config_path}{options}')), '', bold('# To run your scenario interactively:'), bold(red(f'python\nfrom armory.scenarios.main import get as get_scenario\ns = get_scenario("{docker_config_path}"{init_options}).load()\ns.evaluate()')), '', bold('# To gracefully shut down container, press: Ctrl-C'), ''])
log.info('\n'.join(lines))
while True:
time.sleep(1)
</DeepExtract>
elif command:
<DeepExtract>
log.info(bold(red(f'Running bash command: {command}')))
exit_code = runner.exec_cmd(command, user=self.get_id(), expect_sentinel=False)
</DeepExtract>
else:
<DeepExtract>
log.info(bold(red('Running evaluation script')))
b64_config = self._b64_encode_config()
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
if self.no_docker:
kwargs = {}
python = sys.executable
else:
kwargs = {'user': self.get_id()}
python = 'python'
cmd = f'{python} -m armory.scenarios.main {b64_config}{options} --base64'
exit_code = runner.exec_cmd(cmd, **kwargs)
</DeepExtract>
except KeyboardInterrupt:
log.warning('keyboard interrupt caught')
finally:
log.trace('Shutting down container {self.manager.instances.keys()}')
self.manager.stop_armory_instance(runner)
except requests.exceptions.RequestException as e:
log.exception('Starting instance failed.')
if str(e).endswith(f'Bind for 0.0.0.0:{host_port} failed: port is already allocated")'):
log.error(f"Port {host_port} already in use. Try a different one with '--port <port>'")
elif str(e) == '400 Client Error: Bad Request ("Unknown runtime specified nvidia")':
log.error('NVIDIA runtime failed. Either install nvidia-docker or set config "use_gpu" to false')
else:
log.error('Is Docker Daemon running?')
<DeepExtract>
log.info(f'deleting tmp_dir {self.tmp_dir}')
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if not isinstance(e, FileNotFoundError):
log.exception(f'Error removing tmp_dir {self.tmp_dir}')
try:
os.rmdir(self.output_dir)
log.warning(f'removed output_dir {self.output_dir} because it was empty')
except FileNotFoundError:
log.warning(f'output_dir {self.output_dir} was deleted or never created')
except OSError:
jsons = [x for x in os.listdir(self.output_dir) if x.endswith('.json')]
if len(jsons) == 1:
json = jsons[0]
else:
json = ''
output_path = os.path.join(self.output_dir, json)
log.info(f'results output written to:\n{output_path}')
</DeepExtract>
return exit_code
|
def run(self, interactive=False, jupyter=False, host_port=None, command=None, check_run=False, num_eval_batches=None, skip_benign=None, skip_attack=None, skip_misclassified=None, validate_config=None) -> int:
exit_code = 0
if self.no_docker:
if jupyter or interactive or command:
raise ValueError('jupyter, interactive, or bash commands only supported when running Docker containers.')
runner = self.manager.start_armory_instance(envs=self.extra_env_vars)
try:
log.info(bold(red('Running evaluation script')))
b64_config = self._b64_encode_config()
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
if self.no_docker:
kwargs = {}
python = sys.executable
else:
kwargs = {'user': self.get_id()}
python = 'python'
cmd = f'{python} -m armory.scenarios.main {b64_config}{options} --base64'
exit_code = runner.exec_cmd(cmd, **kwargs)
except KeyboardInterrupt:
log.warning('Keyboard interrupt caught')
finally:
log.info('cleaning up...')
log.info(f'deleting tmp_dir {self.tmp_dir}')
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if not isinstance(e, FileNotFoundError):
log.exception(f'Error removing tmp_dir {self.tmp_dir}')
try:
os.rmdir(self.output_dir)
log.warning(f'removed output_dir {self.output_dir} because it was empty')
except FileNotFoundError:
log.warning(f'output_dir {self.output_dir} was deleted or never created')
except OSError:
jsons = [x for x in os.listdir(self.output_dir) if x.endswith('.json')]
if len(jsons) == 1:
json = jsons[0]
else:
json = ''
output_path = os.path.join(self.output_dir, json)
log.info(f'results output written to:\n{output_path}')
return exit_code
if check_run and (jupyter or interactive or command):
raise ValueError('check_run incompatible with interactive, jupyter, or command')
if jupyter or host_port:
if host_port:
ports = {host_port: host_port}
else:
ports = {8888: 8888}
else:
ports = None
try:
runner = self.manager.start_armory_instance(envs=self.extra_env_vars, ports=ports, user=self.get_id())
try:
if jupyter:
if not self.root:
log.warning('Running Jupyter Lab as root inside the container.')
user_group_id = self.get_id()
port = list(ports.keys())[0]
tmp_dir = os.path.join(self.host_paths.tmp_dir, self.config['eval_id'])
os.makedirs(tmp_dir)
self.tmp_config = os.path.join(tmp_dir, 'interactive-config.json')
docker_config_path = os.path.join(paths.runtime_paths().tmp_dir, self.config['eval_id'], 'interactive-config.json')
with open(self.tmp_config, 'w') as f:
f.write(json.dumps(self.config, sort_keys=True, indent=4) + '\n')
init_options = self._constructor_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified)
lines = ['About to launch jupyter.', bold('# To connect on the command line as well, in a new terminal, run:'), bold(red(f'docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash')), '']
if 'scenario' in self.config:
lines.extend([bold('# To run, inside of a notebook:'), bold(red(f'from armory.scenarios.main import get as get_scenario\ns = get_scenario("{docker_config_path}"{init_options}).load()\ns.evaluate()')), ''])
lines.extend([bold('# To gracefully shut down container, press: Ctrl-C'), '', 'Jupyter notebook log:'])
log.info('\n'.join(lines))
runner.exec_cmd(f'jupyter lab --ip=0.0.0.0 --port {port} --no-browser', user=user_group_id, expect_sentinel=False)
elif interactive:
user_group_id = self.get_id()
lines = ['Container ready for interactive use.', bold('# In a new terminal, run the following to attach to the container:'), bold(red(f'docker exec -it -u {user_group_id} {runner.docker_container.short_id} bash')), '']
if self.config.get('scenario'):
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
init_options = self._constructor_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified)
tmp_dir = os.path.join(self.host_paths.tmp_dir, self.config['eval_id'])
os.makedirs(tmp_dir)
self.tmp_config = os.path.join(tmp_dir, 'interactive-config.json')
docker_config_path = os.path.join(paths.runtime_paths().tmp_dir, self.config['eval_id'], 'interactive-config.json')
with open(self.tmp_config, 'w') as f:
f.write(json.dumps(self.config, sort_keys=True, indent=4) + '\n')
lines.extend([bold('# To run your scenario in the container:'), bold(red(f'python -m armory.scenarios.main {docker_config_path}{options}')), '', bold('# To run your scenario interactively:'), bold(red(f'python\nfrom armory.scenarios.main import get as get_scenario\ns = get_scenario("{docker_config_path}"{init_options}).load()\ns.evaluate()')), '', bold('# To gracefully shut down container, press: Ctrl-C'), ''])
log.info('\n'.join(lines))
while True:
time.sleep(1)
elif command:
log.info(bold(red(f'Running bash command: {command}')))
exit_code = runner.exec_cmd(command, user=self.get_id(), expect_sentinel=False)
else:
log.info(bold(red('Running evaluation script')))
b64_config = self._b64_encode_config()
options = self._build_options(check_run=check_run, num_eval_batches=num_eval_batches, skip_benign=skip_benign, skip_attack=skip_attack, skip_misclassified=skip_misclassified, validate_config=validate_config)
if self.no_docker:
kwargs = {}
python = sys.executable
else:
kwargs = {'user': self.get_id()}
python = 'python'
cmd = f'{python} -m armory.scenarios.main {b64_config}{options} --base64'
exit_code = runner.exec_cmd(cmd, **kwargs)
except KeyboardInterrupt:
log.warning('keyboard interrupt caught')
finally:
log.trace('Shutting down container {self.manager.instances.keys()}')
self.manager.stop_armory_instance(runner)
except requests.exceptions.RequestException as e:
log.exception('Starting instance failed.')
if str(e).endswith(f'Bind for 0.0.0.0:{host_port} failed: port is already allocated")'):
log.error(f"Port {host_port} already in use. Try a different one with '--port <port>'")
elif str(e) == '400 Client Error: Bad Request ("Unknown runtime specified nvidia")':
log.error('NVIDIA runtime failed. Either install nvidia-docker or set config "use_gpu" to false')
else:
log.error('Is Docker Daemon running?')
log.info(f'deleting tmp_dir {self.tmp_dir}')
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if not isinstance(e, FileNotFoundError):
log.exception(f'Error removing tmp_dir {self.tmp_dir}')
try:
os.rmdir(self.output_dir)
log.warning(f'removed output_dir {self.output_dir} because it was empty')
except FileNotFoundError:
log.warning(f'output_dir {self.output_dir} was deleted or never created')
except OSError:
jsons = [x for x in os.listdir(self.output_dir) if x.endswith('.json')]
if len(jsons) == 1:
json = jsons[0]
else:
json = ''
output_path = os.path.join(self.output_dir, json)
log.info(f'results output written to:\n{output_path}')
return exit_code
|
armory
|
positive
|
def add_field(name, field):
if isinstance(field, REFERENCING_FIELD_TYPES):
<DeepExtract>
references[name] = RelationInfo(model_field=field, related_model=getattr(field, 'document_type', None))
</DeepExtract>
elif isinstance(field, EMBEDDING_FIELD_TYPES):
<DeepExtract>
embedded[name] = RelationInfo(model_field=field, related_model=getattr(field, 'document_type', None))
</DeepExtract>
elif isinstance(field, COMPOUND_FIELD_TYPES):
fields[name] = field
if field.field:
<DeepExtract>
if isinstance(field.field, REFERENCING_FIELD_TYPES):
references[name + '.child'] = get_relation_info(field.field)
elif isinstance(field.field, EMBEDDING_FIELD_TYPES):
embedded[name + '.child'] = get_relation_info(field.field)
elif isinstance(field.field, COMPOUND_FIELD_TYPES):
fields[name + '.child'] = field.field
if field.field.field:
add_field(name + '.child' + '.child', field.field.field)
elif field.field is pk:
return
else:
fields[name + '.child'] = field.field
</DeepExtract>
elif field is pk:
return
else:
fields[name] = field
|
def add_field(name, field):
if isinstance(field, REFERENCING_FIELD_TYPES):
references[name] = RelationInfo(model_field=field, related_model=getattr(field, 'document_type', None))
elif isinstance(field, EMBEDDING_FIELD_TYPES):
embedded[name] = RelationInfo(model_field=field, related_model=getattr(field, 'document_type', None))
elif isinstance(field, COMPOUND_FIELD_TYPES):
fields[name] = field
if field.field:
if isinstance(field.field, REFERENCING_FIELD_TYPES):
references[name + '.child'] = get_relation_info(field.field)
elif isinstance(field.field, EMBEDDING_FIELD_TYPES):
embedded[name + '.child'] = get_relation_info(field.field)
elif isinstance(field.field, COMPOUND_FIELD_TYPES):
fields[name + '.child'] = field.field
if field.field.field:
add_field(name + '.child' + '.child', field.field.field)
elif field.field is pk:
return
else:
fields[name + '.child'] = field.field
elif field is pk:
return
else:
fields[name] = field
|
django-rest-framework-mongoengine
|
positive
|
def generate_ros_distro_diff(track, repository, distro, override_release_repository_url):
def convert_unicode_dict_to_str(d):
for (key, value) in d.items():
if type(key) == unicode:
del d[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
<DeepExtract>
for (key, value) in value.items():
if type(key) == unicode:
del value[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
convert_unicode_dict_to_str(value)
value[key] = value
</DeepExtract>
d[key] = value
global _user_provided_release_url
distribution_dict = get_distribution_file(distro).get_data()
<DeepExtract>
with inbranch('upstream'):
(_, _, packages) = get_package_data('upstream')
packages = packages
</DeepExtract>
if len(packages) == 0:
warning("No packages found, will not generate 'package: path' entries for rosdistro.")
track_dict = get_tracks_dict_raw()['tracks'][track]
last_version = track_dict['last_version']
release_inc = track_dict['release_inc']
version = '{0}-{1}'.format(last_version, release_inc)
if repository not in distribution_dict['repositories']:
distribution_dict['repositories'][repository] = {}
if 'release' not in distribution_dict['repositories'][repository]:
distribution_dict['repositories'][repository]['release'] = {'url': override_release_repository_url or _user_provided_release_url}
repo = distribution_dict['repositories'][repository]['release']
if override_release_repository_url is not None:
repo['url'] = override_release_repository_url
if 'tags' not in repo:
repo['tags'] = {}
<DeepExtract>
tag = 'release/%s/{package}/{version}' % distro
if sys.version_info[0] < 3:
tag == tag.encode('utf-8')
repo['tags']['release'] = tag
</DeepExtract>
repo['version'] = version
if 'last_release' in track_dict:
repo['upstream_tag'] = track_dict['last_release']
if 'packages' not in repo:
repo['packages'] = []
for (path, pkg) in packages.items():
if pkg.name not in repo['packages']:
repo['packages'].append(pkg.name)
packages_being_released = [p.name for p in packages.values()]
for pkg_name in list(repo['packages']):
if pkg_name not in packages_being_released:
repo['packages'].remove(pkg_name)
repo['packages'].sort()
if sys.version_info[0] < 3:
<DeepExtract>
for (key, value) in repo.items():
if type(key) == unicode:
del repo[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
convert_unicode_dict_to_str(value)
repo[key] = value
</DeepExtract>
def get_repository_info_from_user(url_type, defaults=None):
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
return {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, url_type):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
return {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
return {}
data['version'] = version
return data
if 'BLOOM_DONT_ASK_FOR_DOCS' not in os.environ:
docs = distribution_dict['repositories'][repository].get('doc', {})
if not docs and maybe_continue(msg='Would you like to add documentation information for this repository?'):
defaults = None
info(fmt('@{gf}@!==> @|') + 'Looking for a doc entry for this repository in a different distribution...')
(default_distro, default_doc) = get_most_recent('doc', repository, distro)
if default_distro is None:
warning('No existing doc entries found for use as defaults.')
else:
warning("Using defaults from the doc entry of distribution '{0}'.".format(default_distro))
if default_doc is not None:
defaults = {'type': default_doc.type or None, 'url': default_doc.url or None, 'version': default_doc.version or None}
info('Please enter your repository information for the doc generation job.')
info('This information should point to the repository from which documentation should be generated.')
<DeepExtract>
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
docs = {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, 'doc'):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
docs = {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
docs = {}
data['version'] = version
docs = data
</DeepExtract>
distribution_dict['repositories'][repository]['doc'] = docs
if 'BLOOM_DONT_ASK_FOR_SOURCE' not in os.environ:
source = distribution_dict['repositories'][repository].get('source', {})
if not source and maybe_continue(msg='Would you like to add source information for this repository?'):
defaults = None
info(fmt('@{gf}@!==> @|') + 'Looking for a source entry for this repository in a different distribution...')
(default_distro, default_source) = get_most_recent('source', repository, distro)
if default_distro is None:
warning('No existing source entries found for use as defaults.')
else:
warning("Using defaults from the source entry of distribution '{0}'.".format(default_distro))
if default_source is not None:
defaults = {'type': default_source.type or None, 'url': default_source.url or None, 'version': default_source.version or None}
info('Please enter information which points to the active development branch for this repository.')
info('This information is used to run continuous integration jobs and for developers to checkout from.')
<DeepExtract>
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
source = {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, 'source'):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
source = {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
source = {}
data['version'] = version
source = data
</DeepExtract>
if validate_github_url(source['url'], 'source'):
info('Since you are on github we can add a job to run your tests on each pull request.If you would like to turn this on please see http://wiki.ros.org/buildfarm/Pull%20request%20testing for more information. There is more setup required to setup the hooks correctly. ')
if maybe_continue(msg='Would you like to turn on pull request testing?', default='n'):
source['test_pull_requests'] = 'true'
distribution_dict['repositories'][repository]['source'] = source
if 'BLOOM_DONT_ASK_FOR_MAINTENANCE_STATUS' not in os.environ:
status = distribution_dict['repositories'][repository].get('status', None)
description = distribution_dict['repositories'][repository].get('status_description', None)
if status is None and maybe_continue(msg='Would you like to add a maintenance status for this repository?'):
info('Please enter a maintenance status.')
info('Valid maintenance statuses:')
info('- developed: active development is in progress')
info('- maintained: no new development, but bug fixes and pull requests are addressed')
info('- unmaintained: looking for new maintainer, bug fixes and pull requests will not be addressed')
info('- end-of-life: should not be used, will disappear at some point')
while True:
status = safe_input('Status: ')
if status in ['developed', 'maintained', 'unmaintained', 'end-of-life']:
break
error("'{0}' is not a valid status.".format(status))
if not maybe_continue(msg='Try again'):
status = None
break
if status is not None:
info('You can also enter a status description.')
info("This is usually reserved for giving a reason when a status is 'end-of-life'.")
if description is not None:
info('Current status description: {0}'.format(description))
description_in = safe_input('Status Description [press Enter for no change]: ')
if description_in:
description = description_in
if status is not None:
distribution_dict['repositories'][repository]['status'] = status
if description is not None:
distribution_dict['repositories'][repository]['status_description'] = description
<DeepExtract>
distribution_file_url = urlparse(get_distribution_file_url(distro))
index_file_url = urlparse(get_index_url())
distro_file_name = os.path.relpath(distribution_file_url.path, os.path.commonprefix([index_file_url.path, distribution_file_url.path]))
</DeepExtract>
updated_distribution_file = DistributionFile(distro, distribution_dict)
distro_dump = yaml_from_distribution_file(updated_distribution_file)
distro_file_raw = load_url_to_file_handle(get_distribution_file_url(distro)).read().decode('utf-8')
if distro_file_raw != distro_dump:
udiff = difflib.unified_diff(distro_file_raw.splitlines(), distro_dump.splitlines(), fromfile=distro_file_name, tofile=distro_file_name)
temp_dir = tempfile.mkdtemp()
udiff_file = os.path.join(temp_dir, repository + '-' + version + '.patch')
udiff_raw = ''
info("Unified diff for the ROS distro file located at '{0}':".format(udiff_file))
for line in udiff:
if line.startswith('@@'):
udiff_raw += line
line = fmt('@{cf}' + sanitize(line))
if line.startswith('+'):
if not line.startswith('+++'):
line += '\n'
udiff_raw += line
line = fmt('@{gf}' + sanitize(line))
if line.startswith('-'):
if not line.startswith('---'):
line += '\n'
udiff_raw += line
line = fmt('@{rf}' + sanitize(line))
if line.startswith(' '):
line += '\n'
udiff_raw += line
info(line, use_prefix=False, end='')
distro_file_yaml = yaml.safe_load(distro_file_raw)
distro_yaml = yaml.safe_load(distro_dump)
if 'repositories' in distro_file_yaml:
distro_file_repos = distro_file_yaml['repositories']
for repo in distro_yaml['repositories']:
if repo == repository:
continue
if repo not in distro_file_repos or distro_file_repos[repo] != distro_yaml['repositories'][repo]:
error('This generated pull request modifies a repository entry other than the one being released.')
error('This likely occurred because the upstream rosdistro changed during this release.')
error('This pull request will abort, please re-run this command with the -p option to try again.', exit=True)
with open(udiff_file, 'w+') as f:
f.write(udiff_raw)
return updated_distribution_file
else:
warning('This release resulted in no changes to the ROS distro file...')
return None
|
def generate_ros_distro_diff(track, repository, distro, override_release_repository_url):
def convert_unicode_dict_to_str(d):
for (key, value) in d.items():
if type(key) == unicode:
del d[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
for (key, value) in value.items():
if type(key) == unicode:
del value[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
convert_unicode_dict_to_str(value)
value[key] = value
d[key] = value
global _user_provided_release_url
distribution_dict = get_distribution_file(distro).get_data()
with inbranch('upstream'):
(_, _, packages) = get_package_data('upstream')
packages = packages
if len(packages) == 0:
warning("No packages found, will not generate 'package: path' entries for rosdistro.")
track_dict = get_tracks_dict_raw()['tracks'][track]
last_version = track_dict['last_version']
release_inc = track_dict['release_inc']
version = '{0}-{1}'.format(last_version, release_inc)
if repository not in distribution_dict['repositories']:
distribution_dict['repositories'][repository] = {}
if 'release' not in distribution_dict['repositories'][repository]:
distribution_dict['repositories'][repository]['release'] = {'url': override_release_repository_url or _user_provided_release_url}
repo = distribution_dict['repositories'][repository]['release']
if override_release_repository_url is not None:
repo['url'] = override_release_repository_url
if 'tags' not in repo:
repo['tags'] = {}
tag = 'release/%s/{package}/{version}' % distro
if sys.version_info[0] < 3:
tag == tag.encode('utf-8')
repo['tags']['release'] = tag
repo['version'] = version
if 'last_release' in track_dict:
repo['upstream_tag'] = track_dict['last_release']
if 'packages' not in repo:
repo['packages'] = []
for (path, pkg) in packages.items():
if pkg.name not in repo['packages']:
repo['packages'].append(pkg.name)
packages_being_released = [p.name for p in packages.values()]
for pkg_name in list(repo['packages']):
if pkg_name not in packages_being_released:
repo['packages'].remove(pkg_name)
repo['packages'].sort()
if sys.version_info[0] < 3:
for (key, value) in repo.items():
if type(key) == unicode:
del repo[key]
key = key.encode('utf-8')
if type(value) == unicode:
value = value.encode('utf-8')
if type(value) == dict:
convert_unicode_dict_to_str(value)
repo[key] = value
def get_repository_info_from_user(url_type, defaults=None):
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
return {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, url_type):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
return {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
return {}
data['version'] = version
return data
if 'BLOOM_DONT_ASK_FOR_DOCS' not in os.environ:
docs = distribution_dict['repositories'][repository].get('doc', {})
if not docs and maybe_continue(msg='Would you like to add documentation information for this repository?'):
defaults = None
info(fmt('@{gf}@!==> @|') + 'Looking for a doc entry for this repository in a different distribution...')
(default_distro, default_doc) = get_most_recent('doc', repository, distro)
if default_distro is None:
warning('No existing doc entries found for use as defaults.')
else:
warning("Using defaults from the doc entry of distribution '{0}'.".format(default_distro))
if default_doc is not None:
defaults = {'type': default_doc.type or None, 'url': default_doc.url or None, 'version': default_doc.version or None}
info('Please enter your repository information for the doc generation job.')
info('This information should point to the repository from which documentation should be generated.')
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
docs = {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, 'doc'):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
docs = {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
docs = {}
data['version'] = version
docs = data
distribution_dict['repositories'][repository]['doc'] = docs
if 'BLOOM_DONT_ASK_FOR_SOURCE' not in os.environ:
source = distribution_dict['repositories'][repository].get('source', {})
if not source and maybe_continue(msg='Would you like to add source information for this repository?'):
defaults = None
info(fmt('@{gf}@!==> @|') + 'Looking for a source entry for this repository in a different distribution...')
(default_distro, default_source) = get_most_recent('source', repository, distro)
if default_distro is None:
warning('No existing source entries found for use as defaults.')
else:
warning("Using defaults from the source entry of distribution '{0}'.".format(default_distro))
if default_source is not None:
defaults = {'type': default_source.type or None, 'url': default_source.url or None, 'version': default_source.version or None}
info('Please enter information which points to the active development branch for this repository.')
info('This information is used to run continuous integration jobs and for developers to checkout from.')
data = {}
defaults = defaults or {}
while True:
info('VCS Type must be one of git, svn, hg, or bzr.')
default = defaults.get('type', track_dict.get('vcs_type'))
insert = '' if default is None else ' [{0}]'.format(default)
vcs_type = safe_input('VCS type{0}: '.format(insert))
if not vcs_type:
vcs_type = default
if vcs_type in ['git', 'svn', 'hg', 'bzr']:
break
error("'{0}' is not a valid vcs type.".format(vcs_type))
if not maybe_continue(msg='Try again'):
source = {}
data['type'] = vcs_type
while True:
default = defaults.get('url', track_dict.get('vcs_uri'))
insert = '' if default is None else ' [{0}]'.format(default)
url = safe_input('VCS url{0}: '.format(insert))
if not url:
url = default
if url:
if not validate_github_url(url, 'source'):
continue
break
error('Nothing entered for url.')
if not maybe_continue(msg='Try again'):
source = {}
data['url'] = url
while True:
info('VCS version must be a branch, tag, or commit, e.g. master or 0.1.0')
default = defaults.get('version', track_dict.get('devel_branch'))
insert = '' if default is None else ' [{0}]'.format(default)
version = safe_input('VCS version{0}: '.format(insert))
if not version:
version = default
if version:
break
error('Nothing entered for version.')
if not maybe_continue(msg='Try again'):
source = {}
data['version'] = version
source = data
if validate_github_url(source['url'], 'source'):
info('Since you are on github we can add a job to run your tests on each pull request.If you would like to turn this on please see http://wiki.ros.org/buildfarm/Pull%20request%20testing for more information. There is more setup required to setup the hooks correctly. ')
if maybe_continue(msg='Would you like to turn on pull request testing?', default='n'):
source['test_pull_requests'] = 'true'
distribution_dict['repositories'][repository]['source'] = source
if 'BLOOM_DONT_ASK_FOR_MAINTENANCE_STATUS' not in os.environ:
status = distribution_dict['repositories'][repository].get('status', None)
description = distribution_dict['repositories'][repository].get('status_description', None)
if status is None and maybe_continue(msg='Would you like to add a maintenance status for this repository?'):
info('Please enter a maintenance status.')
info('Valid maintenance statuses:')
info('- developed: active development is in progress')
info('- maintained: no new development, but bug fixes and pull requests are addressed')
info('- unmaintained: looking for new maintainer, bug fixes and pull requests will not be addressed')
info('- end-of-life: should not be used, will disappear at some point')
while True:
status = safe_input('Status: ')
if status in ['developed', 'maintained', 'unmaintained', 'end-of-life']:
break
error("'{0}' is not a valid status.".format(status))
if not maybe_continue(msg='Try again'):
status = None
break
if status is not None:
info('You can also enter a status description.')
info("This is usually reserved for giving a reason when a status is 'end-of-life'.")
if description is not None:
info('Current status description: {0}'.format(description))
description_in = safe_input('Status Description [press Enter for no change]: ')
if description_in:
description = description_in
if status is not None:
distribution_dict['repositories'][repository]['status'] = status
if description is not None:
distribution_dict['repositories'][repository]['status_description'] = description
distribution_file_url = urlparse(get_distribution_file_url(distro))
index_file_url = urlparse(get_index_url())
distro_file_name = os.path.relpath(distribution_file_url.path, os.path.commonprefix([index_file_url.path, distribution_file_url.path]))
updated_distribution_file = DistributionFile(distro, distribution_dict)
distro_dump = yaml_from_distribution_file(updated_distribution_file)
distro_file_raw = load_url_to_file_handle(get_distribution_file_url(distro)).read().decode('utf-8')
if distro_file_raw != distro_dump:
udiff = difflib.unified_diff(distro_file_raw.splitlines(), distro_dump.splitlines(), fromfile=distro_file_name, tofile=distro_file_name)
temp_dir = tempfile.mkdtemp()
udiff_file = os.path.join(temp_dir, repository + '-' + version + '.patch')
udiff_raw = ''
info("Unified diff for the ROS distro file located at '{0}':".format(udiff_file))
for line in udiff:
if line.startswith('@@'):
udiff_raw += line
line = fmt('@{cf}' + sanitize(line))
if line.startswith('+'):
if not line.startswith('+++'):
line += '\n'
udiff_raw += line
line = fmt('@{gf}' + sanitize(line))
if line.startswith('-'):
if not line.startswith('---'):
line += '\n'
udiff_raw += line
line = fmt('@{rf}' + sanitize(line))
if line.startswith(' '):
line += '\n'
udiff_raw += line
info(line, use_prefix=False, end='')
distro_file_yaml = yaml.safe_load(distro_file_raw)
distro_yaml = yaml.safe_load(distro_dump)
if 'repositories' in distro_file_yaml:
distro_file_repos = distro_file_yaml['repositories']
for repo in distro_yaml['repositories']:
if repo == repository:
continue
if repo not in distro_file_repos or distro_file_repos[repo] != distro_yaml['repositories'][repo]:
error('This generated pull request modifies a repository entry other than the one being released.')
error('This likely occurred because the upstream rosdistro changed during this release.')
error('This pull request will abort, please re-run this command with the -p option to try again.', exit=True)
with open(udiff_file, 'w+') as f:
f.write(udiff_raw)
return updated_distribution_file
else:
warning('This release resulted in no changes to the ROS distro file...')
return None
|
bloom
|
positive
|
def from_tf_parameters(parameters):
"""
Instantiate from tensorflow variables.
"""
<DeepExtract>
def torch_from_tf(data):
state_dict = torch.from_numpy(data.eval())
params = dict(parameters)
result = {}
sizes = []
for i in itertools.count():
resolution = 4 * 2 ** (i // 2)
tf_layername = '%dx%d/%s' % (resolution, resolution, 'Dense' if i == 0 else 'Conv' if i == 1 else 'Conv0_up' if i % 2 == 0 else 'Conv1')
pt_layername = 'layer%d' % (i + 1)
try:
weight = torch_from_tf(params['%s/weight' % tf_layername])
except KeyError:
break
if i == 0:
weight = weight.view(weight.shape[0], weight.shape[1] // 16, 4, 4).permute(1, 0, 2, 3).flip(2, 3)
sizes.append(weight.shape[0])
elif i % 2 == 0:
weight = weight.permute(2, 3, 0, 1).flip(2, 3)
else:
weight = weight.permute(3, 2, 0, 1)
sizes.append(weight.shape[1])
result['%s.conv.weight' % pt_layername] = weight
bias = torch_from_tf(params['%s/bias' % tf_layername])
result['%s.wscale.b' % pt_layername] = bias
i -= 1
resolution = 4 * 2 ** (i // 2)
tf_layername = 'ToRGB_lod0'
pt_layername = 'output_%dx%d' % (resolution, resolution)
result['%s.conv.weight' % pt_layername] = torch_from_tf(params['%s/weight' % tf_layername]).permute(3, 2, 0, 1)
result['%s.wscale.b' % pt_layername] = torch_from_tf(params['%s/bias' % tf_layername])
state_dict = result
</DeepExtract>
<DeepExtract>
sizes = []
for i in itertools.count():
pt_layername = 'layer%d' % (i + 1)
try:
weight = state_dict['%s.conv.weight' % pt_layername]
except KeyError:
break
if i == 0:
sizes.append(weight.shape[1])
if i % 2 == 0:
sizes.append(weight.shape[0])
sizes = sizes
</DeepExtract>
result = ProgressiveGenerator(sizes=sizes)
result.load_state_dict(state_dict)
return result
|
def from_tf_parameters(parameters):
"""
Instantiate from tensorflow variables.
"""
def torch_from_tf(data):
state_dict = torch.from_numpy(data.eval())
params = dict(parameters)
result = {}
sizes = []
for i in itertools.count():
resolution = 4 * 2 ** (i // 2)
tf_layername = '%dx%d/%s' % (resolution, resolution, 'Dense' if i == 0 else 'Conv' if i == 1 else 'Conv0_up' if i % 2 == 0 else 'Conv1')
pt_layername = 'layer%d' % (i + 1)
try:
weight = torch_from_tf(params['%s/weight' % tf_layername])
except KeyError:
break
if i == 0:
weight = weight.view(weight.shape[0], weight.shape[1] // 16, 4, 4).permute(1, 0, 2, 3).flip(2, 3)
sizes.append(weight.shape[0])
elif i % 2 == 0:
weight = weight.permute(2, 3, 0, 1).flip(2, 3)
else:
weight = weight.permute(3, 2, 0, 1)
sizes.append(weight.shape[1])
result['%s.conv.weight' % pt_layername] = weight
bias = torch_from_tf(params['%s/bias' % tf_layername])
result['%s.wscale.b' % pt_layername] = bias
i -= 1
resolution = 4 * 2 ** (i // 2)
tf_layername = 'ToRGB_lod0'
pt_layername = 'output_%dx%d' % (resolution, resolution)
result['%s.conv.weight' % pt_layername] = torch_from_tf(params['%s/weight' % tf_layername]).permute(3, 2, 0, 1)
result['%s.wscale.b' % pt_layername] = torch_from_tf(params['%s/bias' % tf_layername])
state_dict = result
sizes = []
for i in itertools.count():
pt_layername = 'layer%d' % (i + 1)
try:
weight = state_dict['%s.conv.weight' % pt_layername]
except KeyError:
break
if i == 0:
sizes.append(weight.shape[1])
if i % 2 == 0:
sizes.append(weight.shape[0])
sizes = sizes
result = ProgressiveGenerator(sizes=sizes)
result.load_state_dict(state_dict)
return result
|
dissect
|
positive
|
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for (k, v) in a.items():
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if type(v) is edict:
try:
<DeepExtract>
if type(a[k]) is not edict:
return
for (k, v) in a[k].items():
if k not in b[k]:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k][k])
if old_type is not type(v):
if isinstance(b[k][k], np.ndarray):
v = np.array(v, dtype=b[k][k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k][k]), type(v), k))
if type(v) is edict:
try:
_merge_a_into_b(a[k][k], b[k][k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k][k] = v
</DeepExtract>
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
|
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for (k, v) in a.items():
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k]), type(v), k))
if type(v) is edict:
try:
if type(a[k]) is not edict:
return
for (k, v) in a[k].items():
if k not in b[k]:
raise KeyError('{} is not a valid config key'.format(k))
old_type = type(b[k][k])
if old_type is not type(v):
if isinstance(b[k][k], np.ndarray):
v = np.array(v, dtype=b[k][k].dtype)
else:
raise ValueError('Type mismatch ({} vs. {}) for config key: {}'.format(type(b[k][k]), type(v), k))
if type(v) is edict:
try:
_merge_a_into_b(a[k][k], b[k][k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k][k] = v
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
|
3D_adapt_auto_driving
|
positive
|
def objfunc(x):
Frames = np.zeros((N_frame, N_pixel, N_pixel))
log_Frames = np.zeros((N_frame, N_pixel, N_pixel))
init_i = 0
for i in range(N_frame):
cur_len = np.sum(embed_mask_List[i])
log_Frames[i] = embed(x[init_i:init_i + cur_len], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Frames[i] = np.exp(log_Frames[i]) * embed_mask_List[i].reshape((N_pixel, N_pixel))
init_i += cur_len
if R_flow['alpha'] != 0.0:
cur_len = np.sum(embed_mask_List[0])
Flow_x = embed(x[init_i:init_i + 2 * cur_len - 1:2], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Flow_y = embed(x[init_i + 1:init_i + 2 * cur_len:2], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Flow = np.transpose([Flow_x.ravel(), Flow_y.ravel()]).reshape((N_pixel, N_pixel, 2))
init_i += 2 * cur_len
if stochastic_optics == True:
EpsilonList = x[init_i:init_i + N ** 2 - 1]
im_List = [image.Image(Frames[j], Prior.psize, Prior.ra, Prior.dec, rf=Obsdata_List[j].rf, source=Prior.source, mjd=Prior.mjd) for j in range(N_frame)]
scatt_im_List = [scattering_model.Scatter(im_List[j], Epsilon_Screen=so.MakeEpsilonScreenFromList(EpsilonList, N), ea_ker=ea_ker[j], sqrtQ=sqrtQ, Linearized_Approximation=True).imvec for j in range(N_frame)]
init_i += len(EpsilonList)
s1 = s2 = 0.0
if alpha_s1 != 0.0:
s1 = static_regularizer(Frames, nprior_embed_List, embed_mask_List, Prior.total_flux(), Prior.psize, entropy1, norm_reg=norm_reg, beam_size=beam_size, alpha_A=alpha_A, **kwargs) * alpha_s1
if alpha_s2 != 0.0:
s2 = static_regularizer(Frames, nprior_embed_List, embed_mask_List, Prior.total_flux(), Prior.psize, entropy2, norm_reg=norm_reg, beam_size=beam_size, alpha_A=alpha_A, **kwargs) * alpha_s2
s_dynamic = cm = flux = s_dS = s_dF = 0.0
if R_dI['alpha'] != 0.0:
s_dynamic += RdI(Frames, **R_dI) * R_dI['alpha']
if R_dt['alpha'] != 0.0:
s_dynamic += Rdt(Frames, B_dt, **R_dt) * R_dt['alpha']
if alpha_dS1 != 0.0:
s_dS += RdS(Frames, nprior_embed_List, embed_mask_List, entropy1, norm_reg, beam_size=beam_size, alpha_A=alpha_A) * alpha_dS1
if alpha_dS2 != 0.0:
s_dS += RdS(Frames, nprior_embed_List, embed_mask_List, entropy2, norm_reg, beam_size=beam_size, alpha_A=alpha_A) * alpha_dS2
if alpha_dF != 0.0:
s_dF += RdF_clip(Frames, embed_mask_List) * alpha_dF
if alpha_centroid != 0.0:
cm = centroid(Frames, coord) * alpha_centroid
if alpha_flux > 0.0:
flux = alpha_flux * movie_flux_constraint(Frames, flux_List)
if stochastic_optics == False:
if processes > 0:
chisq = np.array(pool.map(get_chisq_wrap, [[j, Frames[j].ravel()[embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]] for j in range(N_frame)]))
else:
chisq = np.array([get_chisq(j, Frames[j].ravel()[embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]) for j in range(N_frame)])
elif processes > 0:
chisq = np.array(pool.map(get_chisq_wrap, [[j, scatt_im_List[j][embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]] for j in range(N_frame)]))
else:
chisq = np.array([get_chisq(j, scatt_im_List[j][embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]) for j in range(N_frame)])
chisq = (np.sum(chisq[:, 0]) / N_frame - 1.0) * alpha_d1 + (np.sum(chisq[:, 1]) / N_frame - 1.0) * alpha_d2 + (np.sum(chisq[:, 2]) / N_frame - 1.0) * alpha_d3
if R_flow['alpha'] != 0.0:
<DeepExtract>
flow_tv = np.sum(np.array(Wrapped_Gradient(Flow[:, :, 0])) ** 2 + np.array(Wrapped_Gradient(Flow[:, :, 1])) ** 2)
</DeepExtract>
s_dynamic += flow_tv * R_flow['alpha_flow_tv']
s_dynamic += Rflow(Frames, Flow, **R_flow) * R_flow['alpha']
regterm_scattering = 0.0
if stochastic_optics == True:
chisq_epsilon = sum(EpsilonList * EpsilonList) / ((N * N - 1.0) / 2.0)
regterm_scattering = alpha_phi * (chisq_epsilon - 1.0)
return (s1 + s2 + s_dF + s_dS + s_dynamic + chisq + cm + flux + regterm_scattering) * J_factor
|
def objfunc(x):
Frames = np.zeros((N_frame, N_pixel, N_pixel))
log_Frames = np.zeros((N_frame, N_pixel, N_pixel))
init_i = 0
for i in range(N_frame):
cur_len = np.sum(embed_mask_List[i])
log_Frames[i] = embed(x[init_i:init_i + cur_len], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Frames[i] = np.exp(log_Frames[i]) * embed_mask_List[i].reshape((N_pixel, N_pixel))
init_i += cur_len
if R_flow['alpha'] != 0.0:
cur_len = np.sum(embed_mask_List[0])
Flow_x = embed(x[init_i:init_i + 2 * cur_len - 1:2], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Flow_y = embed(x[init_i + 1:init_i + 2 * cur_len:2], embed_mask_List[i]).reshape((N_pixel, N_pixel))
Flow = np.transpose([Flow_x.ravel(), Flow_y.ravel()]).reshape((N_pixel, N_pixel, 2))
init_i += 2 * cur_len
if stochastic_optics == True:
EpsilonList = x[init_i:init_i + N ** 2 - 1]
im_List = [image.Image(Frames[j], Prior.psize, Prior.ra, Prior.dec, rf=Obsdata_List[j].rf, source=Prior.source, mjd=Prior.mjd) for j in range(N_frame)]
scatt_im_List = [scattering_model.Scatter(im_List[j], Epsilon_Screen=so.MakeEpsilonScreenFromList(EpsilonList, N), ea_ker=ea_ker[j], sqrtQ=sqrtQ, Linearized_Approximation=True).imvec for j in range(N_frame)]
init_i += len(EpsilonList)
s1 = s2 = 0.0
if alpha_s1 != 0.0:
s1 = static_regularizer(Frames, nprior_embed_List, embed_mask_List, Prior.total_flux(), Prior.psize, entropy1, norm_reg=norm_reg, beam_size=beam_size, alpha_A=alpha_A, **kwargs) * alpha_s1
if alpha_s2 != 0.0:
s2 = static_regularizer(Frames, nprior_embed_List, embed_mask_List, Prior.total_flux(), Prior.psize, entropy2, norm_reg=norm_reg, beam_size=beam_size, alpha_A=alpha_A, **kwargs) * alpha_s2
s_dynamic = cm = flux = s_dS = s_dF = 0.0
if R_dI['alpha'] != 0.0:
s_dynamic += RdI(Frames, **R_dI) * R_dI['alpha']
if R_dt['alpha'] != 0.0:
s_dynamic += Rdt(Frames, B_dt, **R_dt) * R_dt['alpha']
if alpha_dS1 != 0.0:
s_dS += RdS(Frames, nprior_embed_List, embed_mask_List, entropy1, norm_reg, beam_size=beam_size, alpha_A=alpha_A) * alpha_dS1
if alpha_dS2 != 0.0:
s_dS += RdS(Frames, nprior_embed_List, embed_mask_List, entropy2, norm_reg, beam_size=beam_size, alpha_A=alpha_A) * alpha_dS2
if alpha_dF != 0.0:
s_dF += RdF_clip(Frames, embed_mask_List) * alpha_dF
if alpha_centroid != 0.0:
cm = centroid(Frames, coord) * alpha_centroid
if alpha_flux > 0.0:
flux = alpha_flux * movie_flux_constraint(Frames, flux_List)
if stochastic_optics == False:
if processes > 0:
chisq = np.array(pool.map(get_chisq_wrap, [[j, Frames[j].ravel()[embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]] for j in range(N_frame)]))
else:
chisq = np.array([get_chisq(j, Frames[j].ravel()[embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]) for j in range(N_frame)])
elif processes > 0:
chisq = np.array(pool.map(get_chisq_wrap, [[j, scatt_im_List[j][embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]] for j in range(N_frame)]))
else:
chisq = np.array([get_chisq(j, scatt_im_List[j][embed_mask_List[j]], d1, d2, d3, ttype, embed_mask_List[j]) for j in range(N_frame)])
chisq = (np.sum(chisq[:, 0]) / N_frame - 1.0) * alpha_d1 + (np.sum(chisq[:, 1]) / N_frame - 1.0) * alpha_d2 + (np.sum(chisq[:, 2]) / N_frame - 1.0) * alpha_d3
if R_flow['alpha'] != 0.0:
flow_tv = np.sum(np.array(Wrapped_Gradient(Flow[:, :, 0])) ** 2 + np.array(Wrapped_Gradient(Flow[:, :, 1])) ** 2)
s_dynamic += flow_tv * R_flow['alpha_flow_tv']
s_dynamic += Rflow(Frames, Flow, **R_flow) * R_flow['alpha']
regterm_scattering = 0.0
if stochastic_optics == True:
chisq_epsilon = sum(EpsilonList * EpsilonList) / ((N * N - 1.0) / 2.0)
regterm_scattering = alpha_phi * (chisq_epsilon - 1.0)
return (s1 + s2 + s_dF + s_dS + s_dynamic + chisq + cm + flux + regterm_scattering) * J_factor
|
eht-imaging
|
positive
|
def get_image(self) -> Tuple[float, np.ndarray]:
"""Reads a single image from the camera.
Returns:
The dict containing the metadata, and the captured image
"""
count = 0
while True:
t = time()
<DeepExtract>
self._write_data(Seek_thermal_pro_commands['Start get image transfer'], b'X[\x01\x00')
to_read = 2 * Seek_thermal_pro_dimensions['Raw width'] * Seek_thermal_pro_dimensions['Raw height']
ret = bytearray()
while to_read - len(ret) > 512:
ret += self._dev.read(endpoint=Seek_therm_usb_req['Read_img'], size_or_buffer=int(to_read / (Seek_thermal_pro_dimensions['Raw height'] / 20)), timeout=1000)
status = ret[4]
if len(ret) == to_read:
(status, img) = (status, np.frombuffer(ret, dtype=np.uint16).reshape(Seek_thermal_pro_dimensions['Raw height'], Seek_thermal_pro_dimensions['Raw width']))
else:
(status, img) = (status, None)
</DeepExtract>
if status == 1:
self._calib = self._crop(img) - 1600
elif status == 3 and self._calib is not None:
return (t, self._correct_dead_pixels(self._crop(img) - self._calib))
elif count == 5:
raise TimeoutError('Unable to read image')
count += 1
|
def get_image(self) -> Tuple[float, np.ndarray]:
"""Reads a single image from the camera.
Returns:
The dict containing the metadata, and the captured image
"""
count = 0
while True:
t = time()
self._write_data(Seek_thermal_pro_commands['Start get image transfer'], b'X[\x01\x00')
to_read = 2 * Seek_thermal_pro_dimensions['Raw width'] * Seek_thermal_pro_dimensions['Raw height']
ret = bytearray()
while to_read - len(ret) > 512:
ret += self._dev.read(endpoint=Seek_therm_usb_req['Read_img'], size_or_buffer=int(to_read / (Seek_thermal_pro_dimensions['Raw height'] / 20)), timeout=1000)
status = ret[4]
if len(ret) == to_read:
(status, img) = (status, np.frombuffer(ret, dtype=np.uint16).reshape(Seek_thermal_pro_dimensions['Raw height'], Seek_thermal_pro_dimensions['Raw width']))
else:
(status, img) = (status, None)
if status == 1:
self._calib = self._crop(img) - 1600
elif status == 3 and self._calib is not None:
return (t, self._correct_dead_pixels(self._crop(img) - self._calib))
elif count == 5:
raise TimeoutError('Unable to read image')
count += 1
|
crappy
|
positive
|
def make_image(relative_image):
absolute_image = os.path.join(settings.MEDIA_ROOT, relative_image)
<DeepExtract>
if not os.path.dirname(relative_image):
return
absolute_path = os.path.join(settings.MEDIA_ROOT, os.path.dirname(relative_image))
if os.path.isdir(absolute_path):
return
if absolute_path not in dirs_to_delete:
dirs_to_delete.append(absolute_path)
make_dirs(os.path.dirname(os.path.dirname(relative_image)))
os.mkdir(absolute_path)
</DeepExtract>
open(absolute_image, 'w').close()
images_to_delete.add(absolute_image)
|
def make_image(relative_image):
absolute_image = os.path.join(settings.MEDIA_ROOT, relative_image)
if not os.path.dirname(relative_image):
return
absolute_path = os.path.join(settings.MEDIA_ROOT, os.path.dirname(relative_image))
if os.path.isdir(absolute_path):
return
if absolute_path not in dirs_to_delete:
dirs_to_delete.append(absolute_path)
make_dirs(os.path.dirname(os.path.dirname(relative_image)))
os.mkdir(absolute_path)
open(absolute_image, 'w').close()
images_to_delete.add(absolute_image)
|
djangopeople.net
|
positive
|
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
opts = new_class._meta
if not opts.instance_loader_class:
opts.instance_loader_class = ModelInstanceLoader
if opts.model:
model_opts = opts.model._meta
declared_fields = new_class.fields
field_list = []
for f in sorted(model_opts.fields + model_opts.many_to_many):
if opts.fields is not None and (not f.name in opts.fields):
continue
if opts.exclude and f.name in opts.exclude:
continue
if f.name in declared_fields:
continue
field = new_class.field_from_django_field(f.name, f, readonly=False)
field_list.append((f.name, field))
new_class.fields.update(OrderedDict(field_list))
if opts.fields is not None:
field_list = []
for field_name in opts.fields:
if field_name in declared_fields:
continue
if field_name.find('__') == -1:
continue
model = opts.model
attrs = field_name.split('__')
for (i, attr) in enumerate(attrs):
verbose_path = '.'.join([opts.model.__name__] + attrs[0:i + 1])
try:
f = model._meta.get_field(attr)
except FieldDoesNotExist as e:
logger.debug(e, exc_info=e)
raise FieldDoesNotExist("%s: %s has no field named '%s'" % (verbose_path, model.__name__, attr))
if i < len(attrs) - 1:
if isinstance(f, ForeignObjectRel):
<DeepExtract>
if hasattr(f, 'related_model'):
model = f.related_model
</DeepExtract>
else:
if get_related_model(f) is None:
raise KeyError('%s is not a relation' % verbose_path)
<DeepExtract>
if hasattr(f, 'related_model'):
model = f.related_model
</DeepExtract>
if isinstance(f, ForeignObjectRel):
f = f.field
field = new_class.field_from_django_field(field_name, f, readonly=True)
field_list.append((field_name, field))
new_class.fields.update(OrderedDict(field_list))
return new_class
|
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
opts = new_class._meta
if not opts.instance_loader_class:
opts.instance_loader_class = ModelInstanceLoader
if opts.model:
model_opts = opts.model._meta
declared_fields = new_class.fields
field_list = []
for f in sorted(model_opts.fields + model_opts.many_to_many):
if opts.fields is not None and (not f.name in opts.fields):
continue
if opts.exclude and f.name in opts.exclude:
continue
if f.name in declared_fields:
continue
field = new_class.field_from_django_field(f.name, f, readonly=False)
field_list.append((f.name, field))
new_class.fields.update(OrderedDict(field_list))
if opts.fields is not None:
field_list = []
for field_name in opts.fields:
if field_name in declared_fields:
continue
if field_name.find('__') == -1:
continue
model = opts.model
attrs = field_name.split('__')
for (i, attr) in enumerate(attrs):
verbose_path = '.'.join([opts.model.__name__] + attrs[0:i + 1])
try:
f = model._meta.get_field(attr)
except FieldDoesNotExist as e:
logger.debug(e, exc_info=e)
raise FieldDoesNotExist("%s: %s has no field named '%s'" % (verbose_path, model.__name__, attr))
if i < len(attrs) - 1:
if isinstance(f, ForeignObjectRel):
if hasattr(f, 'related_model'):
model = f.related_model
else:
if get_related_model(f) is None:
raise KeyError('%s is not a relation' % verbose_path)
if hasattr(f, 'related_model'):
model = f.related_model
if isinstance(f, ForeignObjectRel):
f = f.field
field = new_class.field_from_django_field(field_name, f, readonly=True)
field_list.append((field_name, field))
new_class.fields.update(OrderedDict(field_list))
return new_class
|
django-import-export
|
positive
|
def test_dsm_check_good(self):
<DeepExtract>
parser = get_parser()
['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')] = parser.parse_args(['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')])
(status, msg) = ['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')].func(['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')])
</DeepExtract>
self.assertTrue(status)
|
def test_dsm_check_good(self):
parser = get_parser()
['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')] = parser.parse_args(['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')])
(status, msg) = ['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')].func(['check', '-type', 'dsm', self.get_dsm_path('DSM_PKG/MINI_DSM1')])
self.assertTrue(status)
|
ClairMeta
|
positive
|
def __build_dockmanager_menu(dockmanager):
dm_menu_items = dockmanager.get_menu_items()
if dm_menu_items:
<DeepExtract>
separator = gtk.SeparatorMenuItem()
separator.show()
if self.gtk_menu:
if submenu:
self.submenus[submenu].append(separator)
else:
self.menu.append(separator)
elif submenu:
self.submenus[submenu].add_item(separator)
else:
self.menu.pack_start(separator)
if identifier is not None:
self.items[identifier] = separator
return separator
</DeepExtract>
for (identifier, item) in dm_menu_items.items():
submenu = item.get('container-title', None)
if submenu and (not self.has_submenu(submenu)):
<DeepExtract>
if self.gtk_menu:
item = gtk.MenuItem(submenu)
item.show()
if submenu:
self.submenus[submenu].append(item)
else:
self.menu.append(item)
menu = gtk.Menu()
item.set_submenu(menu)
else:
item = None
menu = CairoToggleMenu(submenu)
if submenu:
self.submenus[submenu].add_item(menu)
else:
self.menu.pack_start(menu)
menu.show()
menu.connect('toggled', self.__on_submenu_toggled)
if identifier is not None:
self.items[identifier] = item or menu
self.submenus[identifier or submenu] = menu
return item or menu
</DeepExtract>
if item['label']:
identifier = 'dockmanager_%s' % identifier
<DeepExtract>
item['label'].set_show_preview(self.show_previews)
item['label'].update_preview()
self.window_box.pack_start(item['label'], True, True)
</DeepExtract>
|
def __build_dockmanager_menu(dockmanager):
dm_menu_items = dockmanager.get_menu_items()
if dm_menu_items:
separator = gtk.SeparatorMenuItem()
separator.show()
if self.gtk_menu:
if submenu:
self.submenus[submenu].append(separator)
else:
self.menu.append(separator)
elif submenu:
self.submenus[submenu].add_item(separator)
else:
self.menu.pack_start(separator)
if identifier is not None:
self.items[identifier] = separator
return separator
for (identifier, item) in dm_menu_items.items():
submenu = item.get('container-title', None)
if submenu and (not self.has_submenu(submenu)):
if self.gtk_menu:
item = gtk.MenuItem(submenu)
item.show()
if submenu:
self.submenus[submenu].append(item)
else:
self.menu.append(item)
menu = gtk.Menu()
item.set_submenu(menu)
else:
item = None
menu = CairoToggleMenu(submenu)
if submenu:
self.submenus[submenu].add_item(menu)
else:
self.menu.pack_start(menu)
menu.show()
menu.connect('toggled', self.__on_submenu_toggled)
if identifier is not None:
self.items[identifier] = item or menu
self.submenus[identifier or submenu] = menu
return item or menu
if item['label']:
identifier = 'dockmanager_%s' % identifier
item['label'].set_show_preview(self.show_previews)
item['label'].update_preview()
self.window_box.pack_start(item['label'], True, True)
</DeepExtract>
|
dockbarx
|
positive
|
def train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, use_amp=False, prof=-1, batch_size_multiplier=1, register_metrics=True):
if register_metrics and logger is not None:
logger.register_metric('train.top1', log.AverageMeter(), log_level=0)
logger.register_metric('train.top5', log.AverageMeter(), log_level=0)
logger.register_metric('train.loss', log.AverageMeter(), log_level=0)
logger.register_metric('train.compute_ips', log.AverageMeter(), log_level=1)
logger.register_metric('train.total_ips', log.AverageMeter(), log_level=0)
logger.register_metric('train.data_time', log.AverageMeter(), log_level=1)
logger.register_metric('train.compute_time', log.AverageMeter(), log_level=1)
if config.debug_memory_model:
print('========== Model Only ===========')
usage = get_memory_usage(True)
exp_recorder.record('network', model_and_loss.arch[0])
exp_recorder.record('algorithm', 'quantize' if model_and_loss.arch[1] == 'quantize' else 'exact')
exp_recorder.record('model_only', usage / GB, 2)
<DeepExtract>
def _step(input, target, optimizer_step=True):
input_var = Variable(input)
target_var = Variable(target)
if config.debug_memory_model:
print('========== Init Data Loader ===========')
init_mem = get_memory_usage(True)
exp_recorder.record('data_loader', init_mem / GB - exp_recorder.val_dict['model_only'], 2)
(loss, output) = model_and_loss(input_var, target_var)
(prec1, prec5) = (torch.zeros(1), torch.zeros(1))
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.data)
else:
reduced_loss = loss.data
if config.debug_memory_model:
print('========== Before Backward ===========')
before_backward = get_memory_usage(True)
act_mem = get_memory_usage() - init_mem - compute_tensor_bytes([loss, output])
res = 'Batch size: %d\tTotal Mem: %.2f MB\tAct Mem: %.2f MB' % (len(output), before_backward / MB, act_mem / MB)
loss.backward()
optimizer.step()
del loss
print('========== After Backward ===========')
after_backward = get_memory_usage(True)
total_mem = before_backward + (after_backward - init_mem)
res = 'Batch size: %d\tTotal Mem: %.2f MB\tAct Mem: %.2f MB' % (len(output), total_mem / MB, act_mem / MB)
print(res)
exp_recorder.record('batch_size', len(output))
exp_recorder.record('total', total_mem / GB, 2)
exp_recorder.record('activation', act_mem / GB, 2)
exp_recorder.dump('mem_results.tsv')
exit()
if fp16:
optimizer.backward(loss)
elif use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if optimizer_step:
opt = optimizer.optimizer if isinstance(optimizer, FP16_Optimizer) else optimizer
for param_group in opt.param_groups:
for param in param_group['params']:
param.grad /= batch_size_multiplier
optimizer.step()
optimizer.zero_grad()
torch.cuda.synchronize()
step = (reduced_loss, output, prec1, prec5)
step = _step
</DeepExtract>
model_and_loss.train()
print('Training mode ', config.training)
end = time.time()
optimizer.zero_grad()
data_iter = enumerate(train_loader)
if logger is not None:
data_iter = logger.iteration_generator_wrapper(data_iter)
for (i, (input, target, index)) in data_iter:
QScheme.batch = index
bs = input.size(0)
lr_scheduler(optimizer, i, epoch)
data_time = time.time() - end
if prof > 0:
if i >= prof:
break
optimizer_step = (i + 1) % batch_size_multiplier == 0
(loss, _, prec1, prec5) = step(input, target, optimizer_step=optimizer_step)
it_time = time.time() - end
if config.debug_speed:
global train_step_ct, train_max_ips, train_max_batch
train_max_ips = max(train_max_ips, calc_ips(bs, it_time))
train_max_batch = max(train_max_batch, len(input))
if train_step_ct >= 3:
res = 'BatchSize: %d\tIPS: %.2f\t,Cost: %.2f ms' % (bs, train_max_ips, 1000.0 / train_max_ips)
print(res, flush=True)
exp_recorder.record('network', model_and_loss.arch[0])
exp_recorder.record('algorithm', 'quantize' if model_and_loss.arch[1] == 'quantize' else 'exact')
exp_recorder.record('batch_size', train_max_batch)
exp_recorder.record('ips', train_max_ips, 1)
exp_recorder.dump('speed_results.tsv')
exit(0)
train_step_ct += 1
if logger is not None:
logger.log_metric('train.top1', to_python_float(prec1))
logger.log_metric('train.top5', to_python_float(prec5))
logger.log_metric('train.loss', to_python_float(loss))
logger.log_metric('train.compute_ips', calc_ips(bs, it_time - data_time))
logger.log_metric('train.total_ips', calc_ips(bs, it_time))
logger.log_metric('train.data_time', data_time)
logger.log_metric('train.compute_time', it_time - data_time)
end = time.time()
|
def train(train_loader, model_and_loss, optimizer, lr_scheduler, fp16, logger, epoch, use_amp=False, prof=-1, batch_size_multiplier=1, register_metrics=True):
if register_metrics and logger is not None:
logger.register_metric('train.top1', log.AverageMeter(), log_level=0)
logger.register_metric('train.top5', log.AverageMeter(), log_level=0)
logger.register_metric('train.loss', log.AverageMeter(), log_level=0)
logger.register_metric('train.compute_ips', log.AverageMeter(), log_level=1)
logger.register_metric('train.total_ips', log.AverageMeter(), log_level=0)
logger.register_metric('train.data_time', log.AverageMeter(), log_level=1)
logger.register_metric('train.compute_time', log.AverageMeter(), log_level=1)
if config.debug_memory_model:
print('========== Model Only ===========')
usage = get_memory_usage(True)
exp_recorder.record('network', model_and_loss.arch[0])
exp_recorder.record('algorithm', 'quantize' if model_and_loss.arch[1] == 'quantize' else 'exact')
exp_recorder.record('model_only', usage / GB, 2)
def _step(input, target, optimizer_step=True):
input_var = Variable(input)
target_var = Variable(target)
if config.debug_memory_model:
print('========== Init Data Loader ===========')
init_mem = get_memory_usage(True)
exp_recorder.record('data_loader', init_mem / GB - exp_recorder.val_dict['model_only'], 2)
(loss, output) = model_and_loss(input_var, target_var)
(prec1, prec5) = (torch.zeros(1), torch.zeros(1))
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.data)
else:
reduced_loss = loss.data
if config.debug_memory_model:
print('========== Before Backward ===========')
before_backward = get_memory_usage(True)
act_mem = get_memory_usage() - init_mem - compute_tensor_bytes([loss, output])
res = 'Batch size: %d\tTotal Mem: %.2f MB\tAct Mem: %.2f MB' % (len(output), before_backward / MB, act_mem / MB)
loss.backward()
optimizer.step()
del loss
print('========== After Backward ===========')
after_backward = get_memory_usage(True)
total_mem = before_backward + (after_backward - init_mem)
res = 'Batch size: %d\tTotal Mem: %.2f MB\tAct Mem: %.2f MB' % (len(output), total_mem / MB, act_mem / MB)
print(res)
exp_recorder.record('batch_size', len(output))
exp_recorder.record('total', total_mem / GB, 2)
exp_recorder.record('activation', act_mem / GB, 2)
exp_recorder.dump('mem_results.tsv')
exit()
if fp16:
optimizer.backward(loss)
elif use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if optimizer_step:
opt = optimizer.optimizer if isinstance(optimizer, FP16_Optimizer) else optimizer
for param_group in opt.param_groups:
for param in param_group['params']:
param.grad /= batch_size_multiplier
optimizer.step()
optimizer.zero_grad()
torch.cuda.synchronize()
step = (reduced_loss, output, prec1, prec5)
step = _step
model_and_loss.train()
print('Training mode ', config.training)
end = time.time()
optimizer.zero_grad()
data_iter = enumerate(train_loader)
if logger is not None:
data_iter = logger.iteration_generator_wrapper(data_iter)
for (i, (input, target, index)) in data_iter:
QScheme.batch = index
bs = input.size(0)
lr_scheduler(optimizer, i, epoch)
data_time = time.time() - end
if prof > 0:
if i >= prof:
break
optimizer_step = (i + 1) % batch_size_multiplier == 0
(loss, _, prec1, prec5) = step(input, target, optimizer_step=optimizer_step)
it_time = time.time() - end
if config.debug_speed:
global train_step_ct, train_max_ips, train_max_batch
train_max_ips = max(train_max_ips, calc_ips(bs, it_time))
train_max_batch = max(train_max_batch, len(input))
if train_step_ct >= 3:
res = 'BatchSize: %d\tIPS: %.2f\t,Cost: %.2f ms' % (bs, train_max_ips, 1000.0 / train_max_ips)
print(res, flush=True)
exp_recorder.record('network', model_and_loss.arch[0])
exp_recorder.record('algorithm', 'quantize' if model_and_loss.arch[1] == 'quantize' else 'exact')
exp_recorder.record('batch_size', train_max_batch)
exp_recorder.record('ips', train_max_ips, 1)
exp_recorder.dump('speed_results.tsv')
exit(0)
train_step_ct += 1
if logger is not None:
logger.log_metric('train.top1', to_python_float(prec1))
logger.log_metric('train.top5', to_python_float(prec5))
logger.log_metric('train.loss', to_python_float(loss))
logger.log_metric('train.compute_ips', calc_ips(bs, it_time - data_time))
logger.log_metric('train.total_ips', calc_ips(bs, it_time))
logger.log_metric('train.data_time', data_time)
logger.log_metric('train.compute_time', it_time - data_time)
end = time.time()
|
actnn
|
positive
|
def all(self):
""" Return everything, without restrictions """
<DeepExtract>
if not isinstance(self.q, (type(None), Q)):
raise ValueError("self.q value '%s' must be None or a Q instance" % self.q)
if not isinstance(self.only_fields, (type(None), tuple)):
raise ValueError("self.only_fields value '%s' must be None or a tuple" % self.only_fields)
if not isinstance(self.order_fields, (type(None), tuple)):
raise ValueError("self.order_fields value '%s' must be None or a tuple" % self.order_fields)
if self.return_format not in self.RETURN_TYPES:
raise ValueError("self.return_value '%s' must be one of %s" % (self.return_format, self.RETURN_TYPES))
new_qs = self.__class__(self.folder_collection, request_type=self.request_type)
new_qs.q = None if self.q is None else deepcopy(self.q)
new_qs.only_fields = self.only_fields
new_qs.order_fields = None if self.order_fields is None else deepcopy(self.order_fields)
new_qs.return_format = self.return_format
new_qs.calendar_view = self.calendar_view
new_qs.page_size = self.page_size
new_qs.max_items = self.max_items
new_qs._depth = self._depth
new_qs = new_qs
</DeepExtract>
return new_qs
|
def all(self):
""" Return everything, without restrictions """
if not isinstance(self.q, (type(None), Q)):
raise ValueError("self.q value '%s' must be None or a Q instance" % self.q)
if not isinstance(self.only_fields, (type(None), tuple)):
raise ValueError("self.only_fields value '%s' must be None or a tuple" % self.only_fields)
if not isinstance(self.order_fields, (type(None), tuple)):
raise ValueError("self.order_fields value '%s' must be None or a tuple" % self.order_fields)
if self.return_format not in self.RETURN_TYPES:
raise ValueError("self.return_value '%s' must be one of %s" % (self.return_format, self.RETURN_TYPES))
new_qs = self.__class__(self.folder_collection, request_type=self.request_type)
new_qs.q = None if self.q is None else deepcopy(self.q)
new_qs.only_fields = self.only_fields
new_qs.order_fields = None if self.order_fields is None else deepcopy(self.order_fields)
new_qs.return_format = self.return_format
new_qs.calendar_view = self.calendar_view
new_qs.page_size = self.page_size
new_qs.max_items = self.max_items
new_qs._depth = self._depth
new_qs = new_qs
return new_qs
|
exchangelib
|
positive
|
@hook.command('note', 'notes', 'todo')
def note(text, conn, nick, db, notice):
"""<add|list|get|del|clear> args - manipulates your list of notes"""
parts = text.split()
if len(parts) == 1 and text.isdigit():
cmd = 'get'
args = parts
else:
cmd = parts[0].lower()
args = parts[1:]
if cmd in ['add', 'new']:
if not len(args):
return 'No text provided!'
note_text = ' '.join(args)
<DeepExtract>
id_query = select([sqlalchemy.sql.expression.func.max(table.c.note_id).label('maxid')]).where(table.c.user == nick.lower())
max_id = db.execute(id_query).scalar()
if max_id is None:
note_id = 1
else:
note_id = max_id + 1
query = table.insert().values(note_id=note_id, connection=conn.name, user=nick.lower(), text=note_text, deleted=False, added=datetime.today())
db.execute(query)
db.commit()
</DeepExtract>
notice('Note added!')
return
elif cmd in ['del', 'delete', 'remove']:
if not len(args):
return 'No note ID provided!'
note_id = args[0]
<DeepExtract>
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
</DeepExtract>
if not n:
notice('#{} is not a valid note ID.'.format(note_id))
return
<DeepExtract>
query = table.update().where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id).values(deleted=1)
db.execute(query)
db.commit()
</DeepExtract>
notice('Note #{} deleted!'.format(note_id))
return
elif cmd == 'clear':
<DeepExtract>
query = table.update().where(table.c.connection == conn.name).where(table.c.user == nick.lower()).values(deleted=1)
db.execute(query)
db.commit()
</DeepExtract>
notice('All notes deleted!')
return
elif cmd == 'get':
if not len(args):
return 'No note ID provided!'
note_id = args[0]
<DeepExtract>
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
</DeepExtract>
if not n:
notice('{} is not a valid note ID.'.format(nick))
return
<DeepExtract>
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
</DeepExtract>
notice(text)
return
elif cmd in ['share', 'show']:
if not len(args):
return 'No note ID provided!'
note_id = args[0]
<DeepExtract>
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
</DeepExtract>
if not n:
notice('{} is not a valid note ID.'.format(nick))
return
<DeepExtract>
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
</DeepExtract>
return text
elif cmd == 'list':
<DeepExtract>
if show_deleted:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).order_by(table.c.added)
else:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.deleted == 0).order_by(table.c.added)
notes = db.execute(query).fetchall()
</DeepExtract>
if not notes:
notice('You have no notes.'.format(nick))
return
notice('All notes for {}:'.format(nick))
for n in notes:
<DeepExtract>
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
</DeepExtract>
notice(text)
elif cmd == 'listall':
<DeepExtract>
if True:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).order_by(table.c.added)
else:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.deleted == 0).order_by(table.c.added)
notes = db.execute(query).fetchall()
</DeepExtract>
if not notes:
notice('You have no notes.'.format(nick))
return
notice('All notes for {}:'.format(nick))
for n in notes:
<DeepExtract>
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
</DeepExtract>
notice(text)
else:
notice('Unknown command: {}'.format(cmd))
|
@hook.command('note', 'notes', 'todo')
def note(text, conn, nick, db, notice):
"""<add|list|get|del|clear> args - manipulates your list of notes"""
parts = text.split()
if len(parts) == 1 and text.isdigit():
cmd = 'get'
args = parts
else:
cmd = parts[0].lower()
args = parts[1:]
if cmd in ['add', 'new']:
if not len(args):
return 'No text provided!'
note_text = ' '.join(args)
id_query = select([sqlalchemy.sql.expression.func.max(table.c.note_id).label('maxid')]).where(table.c.user == nick.lower())
max_id = db.execute(id_query).scalar()
if max_id is None:
note_id = 1
else:
note_id = max_id + 1
query = table.insert().values(note_id=note_id, connection=conn.name, user=nick.lower(), text=note_text, deleted=False, added=datetime.today())
db.execute(query)
db.commit()
notice('Note added!')
return
elif cmd in ['del', 'delete', 'remove']:
if not len(args):
return 'No note ID provided!'
note_id = args[0]
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
if not n:
notice('#{} is not a valid note ID.'.format(note_id))
return
query = table.update().where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id).values(deleted=1)
db.execute(query)
db.commit()
notice('Note #{} deleted!'.format(note_id))
return
elif cmd == 'clear':
query = table.update().where(table.c.connection == conn.name).where(table.c.user == nick.lower()).values(deleted=1)
db.execute(query)
db.commit()
notice('All notes deleted!')
return
elif cmd == 'get':
if not len(args):
return 'No note ID provided!'
note_id = args[0]
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
if not n:
notice('{} is not a valid note ID.'.format(nick))
return
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
notice(text)
return
elif cmd in ['share', 'show']:
if not len(args):
return 'No note ID provided!'
note_id = args[0]
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.note_id == note_id)
n = db.execute(query).fetchone()
if not n:
notice('{} is not a valid note ID.'.format(nick))
return
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
return text
elif cmd == 'list':
if show_deleted:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).order_by(table.c.added)
else:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.deleted == 0).order_by(table.c.added)
notes = db.execute(query).fetchall()
if not notes:
notice('You have no notes.'.format(nick))
return
notice('All notes for {}:'.format(nick))
for n in notes:
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
notice(text)
elif cmd == 'listall':
if True:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).order_by(table.c.added)
else:
query = select([table.c.note_id, table.c.text, table.c.added]).where(table.c.connection == conn.name).where(table.c.user == nick.lower()).where(table.c.deleted == 0).order_by(table.c.added)
notes = db.execute(query).fetchall()
if not notes:
notice('You have no notes.'.format(nick))
return
notice('All notes for {}:'.format(nick))
for n in notes:
(note_id, note_text, added) = n
added_string = added.strftime('%d %b, %Y')
text = '\x02Note #{}:\x02 {} - \x02{}\x02'.format(note_id, note_text, added_string)
notice(text)
else:
notice('Unknown command: {}'.format(cmd))
|
CloudBot
|
positive
|
def handlerAccess_List(parameters):
CLI['scopeLevel'] = 'access_List'
splitValue = str(parameters['value']).split(' ')
if len(splitValue) >= 4:
aclName = splitValue[0]
lineType = splitValue[1]
CLI['scopeItem0'] = aclName
if not aclName in ASA_CFG['acls']:
ASA_CFG['acls'][aclName] = {'inUse': False, 'rules': []}
if lineType == 'extended':
<DeepExtract>
strAclLine = str(parameters['value'])
words = strAclLine.split(' ')
if words[1] == 'extended':
action = words[2]
protocol = words[3]
(items, remainingData) = extractAclScopeItems(words[4:])
if not items is None:
result = {'action': action, 'protocol': protocol, 'port': 'any', 'source': items[0], 'destination': items[1]}
if len(remainingData) > 0:
if remainingData[0] == 'eq':
result['port'] = {'eq': resolvePortNumber(remainingData[1])}
elif remainingData[0] == 'object-group':
result['port'] = {'objectGroup': remainingData[1]}
else:
line = None
line = result
line = None
</DeepExtract>
if not line is None:
ASA_CFG['acls'][aclName]['rules'].append(line)
else:
print('Parser error: Unknown data in ACL line:\n%s' % parameters['value'])
elif lineType == 'standard':
<DeepExtract>
strAclLine = str(parameters['value'])
words = strAclLine.split(' ')
if words[1] == 'standard':
action = words[2]
try:
hostIp = ipaddress.IPv4Network('%s/%s' % (words[3], words[4]))
except:
line = None
result = {'action': action, 'protocol': 'any', 'port': 'any', 'source': 'any', 'destination': {'type': 'static', 'value': hostIp}}
line = result
line = None
</DeepExtract>
if not line is None:
ASA_CFG['acls'][aclName]['rules'].append(line)
else:
print('Parser error: Unknown data in ACL line:\n%s' % parameters['value'])
|
def handlerAccess_List(parameters):
CLI['scopeLevel'] = 'access_List'
splitValue = str(parameters['value']).split(' ')
if len(splitValue) >= 4:
aclName = splitValue[0]
lineType = splitValue[1]
CLI['scopeItem0'] = aclName
if not aclName in ASA_CFG['acls']:
ASA_CFG['acls'][aclName] = {'inUse': False, 'rules': []}
if lineType == 'extended':
strAclLine = str(parameters['value'])
words = strAclLine.split(' ')
if words[1] == 'extended':
action = words[2]
protocol = words[3]
(items, remainingData) = extractAclScopeItems(words[4:])
if not items is None:
result = {'action': action, 'protocol': protocol, 'port': 'any', 'source': items[0], 'destination': items[1]}
if len(remainingData) > 0:
if remainingData[0] == 'eq':
result['port'] = {'eq': resolvePortNumber(remainingData[1])}
elif remainingData[0] == 'object-group':
result['port'] = {'objectGroup': remainingData[1]}
else:
line = None
line = result
line = None
if not line is None:
ASA_CFG['acls'][aclName]['rules'].append(line)
else:
print('Parser error: Unknown data in ACL line:\n%s' % parameters['value'])
elif lineType == 'standard':
strAclLine = str(parameters['value'])
words = strAclLine.split(' ')
if words[1] == 'standard':
action = words[2]
try:
hostIp = ipaddress.IPv4Network('%s/%s' % (words[3], words[4]))
except:
line = None
result = {'action': action, 'protocol': 'any', 'port': 'any', 'source': 'any', 'destination': {'type': 'static', 'value': hostIp}}
line = result
line = None
if not line is None:
ASA_CFG['acls'][aclName]['rules'].append(line)
else:
print('Parser error: Unknown data in ACL line:\n%s' % parameters['value'])
|
automation-scripts
|
positive
|
def main():
d = {'foo': 'bar', 'baz': None}
<DeepExtract>
d = {k: v for (k, v) in d.items() if v is not None}
</DeepExtract>
print(d)
|
def main():
d = {'foo': 'bar', 'baz': None}
d = {k: v for (k, v) in d.items() if v is not None}
print(d)
|
code-snippets-python
|
positive
|
def _UnaryOp(opcode, stack, err_raiser):
if len(stack) < 1:
<DeepExtract>
raise MissingOpArgumentsError(*args, sop=sop, sop_data=sop_data, sop_pc=sop_pc, stack=stack, scriptIn=scriptIn, txTo=txTo, inIdx=inIdx, flags=flags, altstack=altstack, vfExec=vfExec, pbegincodehash=pbegincodehash, nOpCount=nOpCount[0])
</DeepExtract>
<DeepExtract>
v = bitcoin.core._bignum.vch2bn(stack[-1])
if len(stack[-1]) > MAX_NUM_SIZE:
raise err_raiser(EvalScriptError, 'CastToBigNum() : overflow')
bn = v
</DeepExtract>
stack.pop()
if opcode == OP_1ADD:
bn += 1
elif opcode == OP_1SUB:
bn -= 1
elif opcode == OP_NEGATE:
bn = -bn
elif opcode == OP_ABS:
if bn < 0:
bn = -bn
elif opcode == OP_NOT:
bn = long(bn == 0)
elif opcode == OP_0NOTEQUAL:
bn = long(bn != 0)
else:
raise AssertionError('Unknown unary opcode encountered; this should not happen')
stack.append(bitcoin.core._bignum.bn2vch(bn))
|
def _UnaryOp(opcode, stack, err_raiser):
if len(stack) < 1:
raise MissingOpArgumentsError(*args, sop=sop, sop_data=sop_data, sop_pc=sop_pc, stack=stack, scriptIn=scriptIn, txTo=txTo, inIdx=inIdx, flags=flags, altstack=altstack, vfExec=vfExec, pbegincodehash=pbegincodehash, nOpCount=nOpCount[0])
v = bitcoin.core._bignum.vch2bn(stack[-1])
if len(stack[-1]) > MAX_NUM_SIZE:
raise err_raiser(EvalScriptError, 'CastToBigNum() : overflow')
bn = v
stack.pop()
if opcode == OP_1ADD:
bn += 1
elif opcode == OP_1SUB:
bn -= 1
elif opcode == OP_NEGATE:
bn = -bn
elif opcode == OP_ABS:
if bn < 0:
bn = -bn
elif opcode == OP_NOT:
bn = long(bn == 0)
elif opcode == OP_0NOTEQUAL:
bn = long(bn != 0)
else:
raise AssertionError('Unknown unary opcode encountered; this should not happen')
stack.append(bitcoin.core._bignum.bn2vch(bn))
|
checklocktimeverify-demos
|
positive
|
def replace_nan_np(p):
if np.any(np.isnan(p)):
p[np.isnan(p)] = 1.0
p[np.isinf(p)] = 1.0
<DeepExtract>
p = p / np.sum(p, axis=axis, keepdims=True)
</DeepExtract>
return p
|
def replace_nan_np(p):
if np.any(np.isnan(p)):
p[np.isnan(p)] = 1.0
p[np.isinf(p)] = 1.0
p = p / np.sum(p, axis=axis, keepdims=True)
return p
|
cs-ranking
|
positive
|
def on_image_constructed(self, name, prediction, prefix=''):
<DeepExtract>
if prediction is None:
prediction = None
prediction = prediction if not self.border else prediction[self.border:-self.border, self.border:-self.border, ...]
</DeepExtract>
prediction = np.squeeze(prediction)
<DeepExtract>
raise NotImplementedError
</DeepExtract>
|
def on_image_constructed(self, name, prediction, prefix=''):
if prediction is None:
prediction = None
prediction = prediction if not self.border else prediction[self.border:-self.border, self.border:-self.border, ...]
prediction = np.squeeze(prediction)
raise NotImplementedError
</DeepExtract>
|
dsb2018_topcoders
|
positive
|
def _pop_complex_predicates(args):
"""
Compute the cartesian product of "accept" and "content_type"
fields to establish all possible predicate combinations.
.. seealso::
https://github.com/mozilla-services/cornice/pull/91#discussion_r3441384
"""
<DeepExtract>
values = to_list(args.pop('accept', ()))
values = list(map(lambda value: {'kind': 'accept', 'value': value}, values))
accept_list = values
</DeepExtract>
<DeepExtract>
values = to_list(args.pop('content_type', ()))
values = list(map(lambda value: {'kind': 'content_type', 'value': value}, values))
content_type_list = values
</DeepExtract>
product_input = filter(None, [accept_list, content_type_list])
predicate_product = list(filter(None, itertools.product(*product_input)))
return predicate_product
|
def _pop_complex_predicates(args):
"""
Compute the cartesian product of "accept" and "content_type"
fields to establish all possible predicate combinations.
.. seealso::
https://github.com/mozilla-services/cornice/pull/91#discussion_r3441384
"""
values = to_list(args.pop('accept', ()))
values = list(map(lambda value: {'kind': 'accept', 'value': value}, values))
accept_list = values
values = to_list(args.pop('content_type', ()))
values = list(map(lambda value: {'kind': 'content_type', 'value': value}, values))
content_type_list = values
product_input = filter(None, [accept_list, content_type_list])
predicate_product = list(filter(None, itertools.product(*product_input)))
return predicate_product
|
cornice
|
positive
|
def lineratio_index(self, indicator='NII'):
"""Return the line ratio index for the given galaxy.
This is the index used in Vanderplas et al 2009, and makes use
of line-ratio fits from Kewley et al 2001
Parameters
----------
indicator: string ['NII'|'OI'|'SII']
The emission line to use as an indicator
Returns
-------
cln: integer
The classification of the spectrum based on SDSS pipeline and
the line ratios.
0 : unknown (SPEC_CLN = 0)
1 : star (SPEC_CLN = 1)
2 : absorption galaxy (H-alpha seen in absorption)
3 : normal galaxy (no significant H-alpha emission or absorption)
4 : emission line galaxies (below line-ratio curve)
5 : narrow-line QSO (above line-ratio curve)
6 : broad-line QSO (SPEC_CLN = 3)
7 : Sky (SPEC_CLN = 4)
8 : Hi-z QSO (SPEC_CLN = 5)
9 : Late-type star (SPEC_CLN = 6)
10 : Emission galaxy (SPEC_CLN = 7)
ratios: tuple
The line ratios used to compute this
"""
assert indicator in ['NII', 'OI', 'SII']
if self.spec_cln < 2:
return (self.spec_cln, (0, 0))
elif self.spec_cln > 2:
return (self.spec_cln + 3, (0, 0))
<DeepExtract>
lam = LINES.get('Ha')
if lam is None:
lam1 = LINES.get('Ha' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('Ha' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_Ha, nsig_Ha) = (strength, nsig)
</DeepExtract>
<DeepExtract>
lam = LINES.get('Hb')
if lam is None:
lam1 = LINES.get('Hb' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('Hb' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_Hb, nsig_Hb) = (strength, nsig)
</DeepExtract>
if nsig_Ha < 3 or nsig_Hb < 3:
return (3, (0, 0))
if strength_Ha < 0 or strength_Hb < 0:
return (2, (0, 0))
<DeepExtract>
lam = LINES.get(indicator)
if lam is None:
lam1 = LINES.get(indicator + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get(indicator + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_I, nsig_I) = (strength, nsig)
</DeepExtract>
<DeepExtract>
lam = LINES.get('OIII')
if lam is None:
lam1 = LINES.get('OIII' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('OIII' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_OIII, nsig_OIII) = (strength, nsig)
</DeepExtract>
log_OIII_Hb = np.log10(strength_OIII / strength_Hb)
I_Ha = np.log10(strength_I / strength_Ha)
if indicator == 'NII':
if I_Ha >= 0.47 or log_OIII_Hb >= log_OIII_Hb_NII(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
elif indicator == 'OI':
if I_Ha >= -0.59 or log_OIII_Hb >= log_OIII_Hb_OI(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
elif I_Ha >= 0.32 or log_OIII_Hb >= log_OIII_Hb_SII(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
|
def lineratio_index(self, indicator='NII'):
"""Return the line ratio index for the given galaxy.
This is the index used in Vanderplas et al 2009, and makes use
of line-ratio fits from Kewley et al 2001
Parameters
----------
indicator: string ['NII'|'OI'|'SII']
The emission line to use as an indicator
Returns
-------
cln: integer
The classification of the spectrum based on SDSS pipeline and
the line ratios.
0 : unknown (SPEC_CLN = 0)
1 : star (SPEC_CLN = 1)
2 : absorption galaxy (H-alpha seen in absorption)
3 : normal galaxy (no significant H-alpha emission or absorption)
4 : emission line galaxies (below line-ratio curve)
5 : narrow-line QSO (above line-ratio curve)
6 : broad-line QSO (SPEC_CLN = 3)
7 : Sky (SPEC_CLN = 4)
8 : Hi-z QSO (SPEC_CLN = 5)
9 : Late-type star (SPEC_CLN = 6)
10 : Emission galaxy (SPEC_CLN = 7)
ratios: tuple
The line ratios used to compute this
"""
assert indicator in ['NII', 'OI', 'SII']
if self.spec_cln < 2:
return (self.spec_cln, (0, 0))
elif self.spec_cln > 2:
return (self.spec_cln + 3, (0, 0))
lam = LINES.get('Ha')
if lam is None:
lam1 = LINES.get('Ha' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('Ha' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_Ha, nsig_Ha) = (strength, nsig)
lam = LINES.get('Hb')
if lam is None:
lam1 = LINES.get('Hb' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('Hb' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_Hb, nsig_Hb) = (strength, nsig)
if nsig_Ha < 3 or nsig_Hb < 3:
return (3, (0, 0))
if strength_Ha < 0 or strength_Hb < 0:
return (2, (0, 0))
lam = LINES.get(indicator)
if lam is None:
lam1 = LINES.get(indicator + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get(indicator + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_I, nsig_I) = (strength, nsig)
lam = LINES.get('OIII')
if lam is None:
lam1 = LINES.get('OIII' + 'a')
ind1 = np.where(abs(self.hdulist[2].data['restWave'] - lam1) < 1)[0]
lam2 = LINES.get('OIII' + 'b')
ind2 = np.where(abs(self.hdulist[2].data['restWave'] - lam2) < 1)[0]
if len(ind1) == 0:
s1 = h1 = 0
nsig1 = 0
else:
s1 = self.hdulist[2].data['sigma'][ind1]
h1 = self.hdulist[2].data['height'][ind1]
nsig1 = self.hdulist[2].data['nsigma'][ind1]
if len(ind2) == 0:
s2 = h2 = 0
nsig2 = 0
else:
s2 = self.hdulist[2].data['sigma'][ind2]
h2 = self.hdulist[2].data['height'][ind2]
nsig2 = self.hdulist[2].data['nsigma'][ind2]
strength = s1 * h1 + s2 * h2
nsig = max(nsig1, nsig2)
else:
ind = np.where(abs(self.hdulist[2].data['restWave'] - lam) < 1)[0]
if len(ind) == 0:
strength = 0
nsig = 0
else:
s = self.hdulist[2].data['sigma'][ind]
h = self.hdulist[2].data['height'][ind]
nsig = self.hdulist[2].data['nsigma'][ind]
strength = s * h
(strength_OIII, nsig_OIII) = (strength, nsig)
log_OIII_Hb = np.log10(strength_OIII / strength_Hb)
I_Ha = np.log10(strength_I / strength_Ha)
if indicator == 'NII':
if I_Ha >= 0.47 or log_OIII_Hb >= log_OIII_Hb_NII(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
elif indicator == 'OI':
if I_Ha >= -0.59 or log_OIII_Hb >= log_OIII_Hb_OI(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
elif I_Ha >= 0.32 or log_OIII_Hb >= log_OIII_Hb_SII(I_Ha):
return (5, (I_Ha, log_OIII_Hb))
else:
return (4, (I_Ha, log_OIII_Hb))
|
astroML
|
positive
|
def max_split_betweenness(G, dic):
"""
Given a dictionary of vertices and their pair betweenness scores, uses the greedy
algorithm discussed in the CONGA paper to find a (hopefully) near-optimal split.
Returns a 3-tuple (vMax, vNum, vSpl) where vMax is the max split betweenness,
vNum is the vertex with said split betweenness, and vSpl is a list of which
vertices are on each side of the optimal split.
"""
vMax = 0
for v in dic:
<DeepExtract>
neighbors = G.neighbors(v)
mapping = {neigh: i for (i, neigh) in enumerate(neighbors)}
n = len(neighbors)
clique = np.zeros((n, n))
for (uw, score) in dic[v].items():
clique[mapping[uw[0]], mapping[uw[1]]] = score
clique[mapping[uw[1]], mapping[uw[0]]] = score
np.fill_diagonal(clique, 0)
clique = clique
</DeepExtract>
vMap = [[ve] for ve in G.neighbors(v)]
while clique.size > 4:
<DeepExtract>
(i, j) = mat_min(clique)
clique[i, :] = clique[j, :] + clique[i, :]
clique = np.delete(clique, j, axis=0)
clique[:, i] = clique[:, j] + clique[:, i]
clique = np.delete(clique, j, axis=1)
np.fill_diagonal(clique, 0)
(i, j, clique) = (i, j, clique)
</DeepExtract>
vMap[i] += vMap.pop(j)
if clique[0, 1] >= vMax:
vMax = clique[0, 1]
vNum = v
vSpl = vMap
return (vMax, vNum, vSpl)
|
def max_split_betweenness(G, dic):
"""
Given a dictionary of vertices and their pair betweenness scores, uses the greedy
algorithm discussed in the CONGA paper to find a (hopefully) near-optimal split.
Returns a 3-tuple (vMax, vNum, vSpl) where vMax is the max split betweenness,
vNum is the vertex with said split betweenness, and vSpl is a list of which
vertices are on each side of the optimal split.
"""
vMax = 0
for v in dic:
neighbors = G.neighbors(v)
mapping = {neigh: i for (i, neigh) in enumerate(neighbors)}
n = len(neighbors)
clique = np.zeros((n, n))
for (uw, score) in dic[v].items():
clique[mapping[uw[0]], mapping[uw[1]]] = score
clique[mapping[uw[1]], mapping[uw[0]]] = score
np.fill_diagonal(clique, 0)
clique = clique
vMap = [[ve] for ve in G.neighbors(v)]
while clique.size > 4:
(i, j) = mat_min(clique)
clique[i, :] = clique[j, :] + clique[i, :]
clique = np.delete(clique, j, axis=0)
clique[:, i] = clique[:, j] + clique[:, i]
clique = np.delete(clique, j, axis=1)
np.fill_diagonal(clique, 0)
(i, j, clique) = (i, j, clique)
vMap[i] += vMap.pop(j)
if clique[0, 1] >= vMax:
vMax = clique[0, 1]
vNum = v
vSpl = vMap
return (vMax, vNum, vSpl)
|
cdlib
|
positive
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | (d_a >> 16) * (d_b & 65535) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | (d_a & 65535) * (d_b >> 16) << n.value & (sc0 ^ 4294967295)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_w0 = e_d_0 - mul_res0
result_w1 = e_d_1 - mul_res1
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
self.put(result_w0_ssov, 'd{0}'.format(self.data['c']))
self.put(result_w1_ssov, 'd{0}'.format(self.data['c'] + 1))
c = 0
ov_w0 = overflow(result_w0_ssov)
ov_w1 = overflow(result_w1_ssov)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0_ssov)
aov_w1 = advanced_overflow(result_w1_ssov)
av = aov_w1 | aov_w0
<DeepExtract>
psw = self.get('psw', Type.int_32)
</DeepExtract>
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc1 = extend_to_32_bits((d_a >> 16 == 32768) & (d_b & 65535 == 32768) & (n == 1).cast_to(Type.int_32))
sc0 = extend_to_32_bits((d_a & 65535 == 32768) & (d_b >> 16 == 32768) & (n == 1).cast_to(Type.int_32))
mul_res1 = 2147483647 & sc1 | (d_a >> 16) * (d_b & 65535) << n.value & (sc1 ^ 4294967295)
mul_res0 = 2147483647 & sc0 | (d_a & 65535) * (d_b >> 16) << n.value & (sc0 ^ 4294967295)
e_d_0 = self.get('d{0}'.format(self.data['d']), Type.int_32)
e_d_1 = self.get('d{0}'.format(self.data['d'] + 1), Type.int_32)
result_w0 = e_d_0 - mul_res0
result_w1 = e_d_1 - mul_res1
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
self.put(result_w0_ssov, 'd{0}'.format(self.data['c']))
self.put(result_w1_ssov, 'd{0}'.format(self.data['c'] + 1))
c = 0
ov_w0 = overflow(result_w0_ssov)
ov_w1 = overflow(result_w1_ssov)
v = ov_w1 | ov_w0
aov_w0 = advanced_overflow(result_w0_ssov)
aov_w1 = advanced_overflow(result_w1_ssov)
av = aov_w1 | aov_w0
psw = self.get('psw', Type.int_32)
cond_sv = v == 0
cond_sav = av == 0
sv = psw & SV_MASK & cond_sv | 1 & (cond_sv ^ 1)
sav = psw & ASV_MASK & cond_sav | 1 & (cond_sav ^ 1)
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, 'psw')
|
angr-platforms
|
positive
|
def get_cmdclass():
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
cmds = {}
from distutils.core import Command
class cmd_version(Command):
description = 'report generated version string'
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
True = True or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, True)
if True:
print('got version from expanded keyword %s' % ver)
vers = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if True:
print('got version from file %s %s' % (versionfile_abs, ver))
vers = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, True)
ver = render(pieces, cfg.style)
if True:
print('got version from VCS %s' % ver)
vers = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, True)
if True:
print('got version from parentdir %s' % ver)
vers = ver
except NotThisMethod:
pass
if True:
print('unable to compute version')
vers = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
</DeepExtract>
print('Version: %s' % vers['version'])
print(' full-revisionid: %s' % vers.get('full-revisionid'))
print(' dirty: %s' % vers.get('dirty'))
if vers['error']:
print(' error: %s' % vers['error'])
cmds['version'] = cmd_version
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
</DeepExtract>
_build_py.run(self)
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
cmds['build_py'] = cmd_build_py
if 'cx_Freeze' in sys.modules:
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
</DeepExtract>
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
cmds['build_exe'] = cmd_build_exe
del cmds['build_py']
if 'setuptools' in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
</DeepExtract>
self._versioneer_generated_versions = versions
self.distribution.metadata.version = versions['version']
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
_sdist.make_release_tree(self, base_dir, files)
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(self._versioneer_generated_versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, self._versioneer_generated_versions['version']))
</DeepExtract>
cmds['sdist'] = cmd_sdist
return cmds
|
def get_cmdclass():
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
cmds = {}
from distutils.core import Command
class cmd_version(Command):
description = 'report generated version string'
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
True = True or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, True)
if True:
print('got version from expanded keyword %s' % ver)
vers = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if True:
print('got version from file %s %s' % (versionfile_abs, ver))
vers = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, True)
ver = render(pieces, cfg.style)
if True:
print('got version from VCS %s' % ver)
vers = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, True)
if True:
print('got version from parentdir %s' % ver)
vers = ver
except NotThisMethod:
pass
if True:
print('unable to compute version')
vers = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
print('Version: %s' % vers['version'])
print(' full-revisionid: %s' % vers.get('full-revisionid'))
print(' dirty: %s' % vers.get('dirty'))
if vers['error']:
print(' error: %s' % vers['error'])
cmds['version'] = cmd_version
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
_build_py.run(self)
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
cmds['build_py'] = cmd_build_py
if 'cx_Freeze' in sys.modules:
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
cmds['build_exe'] = cmd_build_exe
del cmds['build_py']
if 'setuptools' in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version'}
self._versioneer_generated_versions = versions
self.distribution.metadata.version = versions['version']
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
_sdist.make_release_tree(self, base_dir, files)
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(self._versioneer_generated_versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, self._versioneer_generated_versions['version']))
cmds['sdist'] = cmd_sdist
return cmds
|
bhmm
|
positive
|
def test_fail_on_first() -> None:
<DeepExtract>
(work_done, ex) = check_executor_work([('same_key', i) for i in range(10)], 1, 1, True)
</DeepExtract>
assert work_done == [0, 1, 2, 3, 4]
assert ex is not None
assert ex.args[0] == 'Abort 5'
|
def test_fail_on_first() -> None:
(work_done, ex) = check_executor_work([('same_key', i) for i in range(10)], 1, 1, True)
assert work_done == [0, 1, 2, 3, 4]
assert ex is not None
assert ex.args[0] == 'Abort 5'
|
cloudkeeper
|
positive
|
def read_item_id():
<DeepExtract>
crawled_id_filename = os.path.join(PATH, 'log', crawled_id_in_log)
if not os.path.isfile(crawled_id_filename):
open(crawled_id_filename, mode='w')
with codecs.open(crawled_id_filename, encoding='utf-8') as f:
try:
last_crawled_id_str = f.readlines()[-1]
last_crawled_id = pattern.search(last_crawled_id_str).group()
except:
last_crawled_id = '000000'
items_id_filename = os.path.join(PATH, 'sys', '%s_item_id' % SPIDER_NAME)
with codecs.open(items_id_filename, encoding='utf-8') as idf:
whole_item_id_list = [item.strip() for item in idf.readlines()]
try:
break_point_index = whole_item_id_list.index(last_crawled_id)
except:
break_point_index = 0
whole_item_id_list = whole_item_id_list[break_point_index:]
</DeepExtract>
failed_id_filename = os.path.join(PATH, 'log', failed_id_in_log)
crawled_id_filename = os.path.join(PATH, 'log', crawled_id_in_log)
http_hanlder = urllib2.ProxyHandler({'http': 'http://%s' % ip_port})
opener = urllib2.build_opener(http_hanlder)
urllib2.install_opener(opener)
with codecs.open(failed_id_filename, mode='a', encoding='utf-8') as failed_id_wf, codecs.open(crawled_id_filename, mode='a', encoding='utf-8') as crawled_id_wf:
for item_id in whole_item_id_list:
item_msg_list = []
url = root_url_pattern % item_id
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
time.sleep(30)
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
time.sleep(30)
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
failed_id_wf.write('request timed item_id in url;%s\n' % url)
continue
try:
soup = BeautifulSoup(html, 'html5lib')
name_str = soup.find('div', id='name')
except:
continue
if not name_str:
failed_id_wf.write('not match name_str in url;%s\n' % url)
continue
try:
item_name = name_str.text.strip()
item_msg_list.append(item_name + '\n')
except:
pass
detail_info = soup.find('div', id='product-detail-1')
item_info = ''
if not detail_info:
failed_id_wf.write('not match detail-list-1 in url;%s\n' % url)
else:
try:
item_info = detail_info.text.strip()
except:
<DeepExtract>
timestamp = time.strftime('%Y_%m_%d_%H_{}'.format(SPIDER_NAME))
filename = os.path.join(PATH, 'out', timestamp)
with codecs.open(filename, 'a', encoding='utf-8') as wf:
wf.writelines(item_msg_list)
</DeepExtract>
crawled_id_wf.write(url + '\n')
continue
item_msg_list.append(item_info + '\n')
<DeepExtract>
timestamp = time.strftime('%Y_%m_%d_%H_{}'.format(SPIDER_NAME))
filename = os.path.join(PATH, 'out', timestamp)
with codecs.open(filename, 'a', encoding='utf-8') as wf:
wf.writelines(item_msg_list)
</DeepExtract>
crawled_id_wf.write(url + '\n')
|
def read_item_id():
crawled_id_filename = os.path.join(PATH, 'log', crawled_id_in_log)
if not os.path.isfile(crawled_id_filename):
open(crawled_id_filename, mode='w')
with codecs.open(crawled_id_filename, encoding='utf-8') as f:
try:
last_crawled_id_str = f.readlines()[-1]
last_crawled_id = pattern.search(last_crawled_id_str).group()
except:
last_crawled_id = '000000'
items_id_filename = os.path.join(PATH, 'sys', '%s_item_id' % SPIDER_NAME)
with codecs.open(items_id_filename, encoding='utf-8') as idf:
whole_item_id_list = [item.strip() for item in idf.readlines()]
try:
break_point_index = whole_item_id_list.index(last_crawled_id)
except:
break_point_index = 0
whole_item_id_list = whole_item_id_list[break_point_index:]
failed_id_filename = os.path.join(PATH, 'log', failed_id_in_log)
crawled_id_filename = os.path.join(PATH, 'log', crawled_id_in_log)
http_hanlder = urllib2.ProxyHandler({'http': 'http://%s' % ip_port})
opener = urllib2.build_opener(http_hanlder)
urllib2.install_opener(opener)
with codecs.open(failed_id_filename, mode='a', encoding='utf-8') as failed_id_wf, codecs.open(crawled_id_filename, mode='a', encoding='utf-8') as crawled_id_wf:
for item_id in whole_item_id_list:
item_msg_list = []
url = root_url_pattern % item_id
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
time.sleep(30)
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
time.sleep(30)
try:
html = urllib2.urlopen(url, timeout=15).read()
except BaseException:
failed_id_wf.write('request timed item_id in url;%s\n' % url)
continue
try:
soup = BeautifulSoup(html, 'html5lib')
name_str = soup.find('div', id='name')
except:
continue
if not name_str:
failed_id_wf.write('not match name_str in url;%s\n' % url)
continue
try:
item_name = name_str.text.strip()
item_msg_list.append(item_name + '\n')
except:
pass
detail_info = soup.find('div', id='product-detail-1')
item_info = ''
if not detail_info:
failed_id_wf.write('not match detail-list-1 in url;%s\n' % url)
else:
try:
item_info = detail_info.text.strip()
except:
timestamp = time.strftime('%Y_%m_%d_%H_{}'.format(SPIDER_NAME))
filename = os.path.join(PATH, 'out', timestamp)
with codecs.open(filename, 'a', encoding='utf-8') as wf:
wf.writelines(item_msg_list)
crawled_id_wf.write(url + '\n')
continue
item_msg_list.append(item_info + '\n')
timestamp = time.strftime('%Y_%m_%d_%H_{}'.format(SPIDER_NAME))
filename = os.path.join(PATH, 'out', timestamp)
with codecs.open(filename, 'a', encoding='utf-8') as wf:
wf.writelines(item_msg_list)
crawled_id_wf.write(url + '\n')
|
e-business
|
positive
|
@unittest.skipIf(DJANGO_VERSION >= (3, 0) and 'mysql' in os.environ.get('DATABASE_URL', ''), 'Negative ids no longer work in Django 3.0+ with MySQL.')
def test_view_manage_negative_group_form(self):
<DeepExtract>
self.client.login(username='admin', password='admin')
</DeepExtract>
url = reverse('admin:%s_%s_permissions' % self.obj_info, args=[self.obj.pk])
self.group = Group.objects.create(name='neagive_id_group', id=-2010)
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' % self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
|
@unittest.skipIf(DJANGO_VERSION >= (3, 0) and 'mysql' in os.environ.get('DATABASE_URL', ''), 'Negative ids no longer work in Django 3.0+ with MySQL.')
def test_view_manage_negative_group_form(self):
self.client.login(username='admin', password='admin')
url = reverse('admin:%s_%s_permissions' % self.obj_info, args=[self.obj.pk])
self.group = Group.objects.create(name='neagive_id_group', id=-2010)
data = {'group': self.group.name, 'submit_manage_group': 'submit'}
response = self.client.post(url, data, follow=True)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.redirect_chain[0][1], 302)
redirect_url = reverse('admin:%s_%s_permissions_manage_group' % self.obj_info, args=[self.obj.pk, self.group.id])
self.assertEqual(response.request['PATH_INFO'], redirect_url)
|
django-guardian
|
positive
|
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
<DeepExtract>
dataset_size = len(self.group_ids)
sampled_ids = torch.as_tensor(list(self.sampler))
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
mask = order >= 0
clusters = [(self.group_ids == i) & mask for i in self.groups]
relative_order = [order[cluster] for cluster in clusters]
permutation_ids = [s[s.sort()[1]] for s in relative_order]
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
first_element_of_batch = [t[0].item() for t in merged]
inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())}
first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch])
permutation_order = first_index_of_batch.sort(0)[1].tolist()
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
batches = batches
</DeepExtract>
self._batches = batches
return iter(batches)
|
def __iter__(self):
if self._can_reuse_batches:
batches = self._batches
self._can_reuse_batches = False
else:
dataset_size = len(self.group_ids)
sampled_ids = torch.as_tensor(list(self.sampler))
order = torch.full((dataset_size,), -1, dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
mask = order >= 0
clusters = [(self.group_ids == i) & mask for i in self.groups]
relative_order = [order[cluster] for cluster in clusters]
permutation_ids = [s[s.sort()[1]] for s in relative_order]
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
first_element_of_batch = [t[0].item() for t in merged]
inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())}
first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch])
permutation_order = first_index_of_batch.sort(0)[1].tolist()
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if len(batch) == self.batch_size:
kept.append(batch)
batches = kept
batches = batches
self._batches = batches
return iter(batches)
|
bezier_curve_text_spotting
|
positive
|
def get_paho_data(offset=0, dir='downloads'):
opts = Options()
opts.set_headless()
assert opts.headless
fp = FirefoxProfile()
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('browser.download.dir', os.path.abspath(dir))
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
browser = Firefox(options=opts, firefox_profile=fp)
browser.get('http://www.paho.org/data/index.php/en/mnu-topics/indicadores-dengue-en/dengue-nacional-en/252-dengue-pais-ano-en.html?showall=&start=1')
tab1 = browser.window_handles[0]
browser.execute_script('window.open("","_blank");')
tab2 = browser.window_handles[1]
browser.switch_to.window(tab1)
curr_offset = offset
<DeepExtract>
try:
WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.rt-top-inner')))
WebDriverWait(browser, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.rt-top-inner')))
print('Success Loading %s' % 'div.rt-top-inner')
except TimeoutException:
print('Loading %s took too much time!' % 'div.rt-top-inner')
</DeepExtract>
header = browser.find_element_by_css_selector('div.rt-top-inner')
global headerheight
headerheight = header.rect['height']
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption = browser.find_elements_by_css_selector('div.tabToolbarButton.tab-widget.download')[0]
<DeepExtract>
downloadoption.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption.click()
</DeepExtract>
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[data-tb-test-id='DownloadImage-Button']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[data-tb-test-id='DownloadImage-Button']")))
print('Success Loading %s' % "div[data-tb-test-id='DownloadImage-Button']")
except TimeoutException:
print('Loading %s took too much time!' % "div[data-tb-test-id='DownloadImage-Button']")
</DeepExtract>
imagebutton = browser.find_elements_by_css_selector("div[data-tb-test-id='DownloadImage-Button']")[0]
<DeepExtract>
imagebutton.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
imagebutton.click()
</DeepExtract>
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".tabDownloadFileButton[data-test-id='DownloadLink']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".tabDownloadFileButton[data-test-id='DownloadLink']")))
print('Success Loading %s' % ".tabDownloadFileButton[data-test-id='DownloadLink']")
except TimeoutException:
print('Loading %s took too much time!' % ".tabDownloadFileButton[data-test-id='DownloadLink']")
</DeepExtract>
downloadbutton = browser.find_elements_by_css_selector(".tabDownloadFileButton[data-test-id='DownloadLink']")[0]
href = downloadbutton.get_attribute('href')
startidx = href.index('sessions/') + len('sessions/')
endidx = href.index('/', startidx)
sessionid = href[startidx:endidx]
dataurl = 'http://phip.paho.org/vizql/w/Casosdedengue_tben/v/ByLastAvailableEpiWeek/viewData/sessions/%s/views/18076444178507886853_9530488980060483892?maxrows=200&viz=%%7B%%22worksheet%%22:%%22W%%20By%%20Last%%20Available%%20EpiWeek%%22,%%22dashboard%%22:%%22By%%20Last%%20Available%%20Epi%%20Week%%22%%7D' % sessionid
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[data-tb-test-id='CancelBtn-Button']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[data-tb-test-id='CancelBtn-Button']")))
print('Success Loading %s' % "div[data-tb-test-id='CancelBtn-Button']")
except TimeoutException:
print('Loading %s took too much time!' % "div[data-tb-test-id='CancelBtn-Button']")
</DeepExtract>
cancelbutton = browser.find_elements_by_css_selector("div[data-tb-test-id='CancelBtn-Button']")[0]
<DeepExtract>
cancelbutton.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
cancelbutton.click()
</DeepExtract>
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[id='tableau_base_widget_FilterPanel_0']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[id='tableau_base_widget_FilterPanel_0']")))
print('Success Loading %s' % "div[id='tableau_base_widget_FilterPanel_0']")
except TimeoutException:
print('Loading %s took too much time!' % "div[id='tableau_base_widget_FilterPanel_0']")
</DeepExtract>
yearselector = browser.find_elements_by_css_selector("div[id='tableau_base_widget_FilterPanel_0']")[0]
<DeepExtract>
yearselector.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
yearselector.click()
</DeepExtract>
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.facetOverflow')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.facetOverflow')))
print('Success Loading %s' % 'div.facetOverflow')
except TimeoutException:
print('Loading %s took too much time!' % 'div.facetOverflow')
</DeepExtract>
y = None
for i in browser.find_elements_by_css_selector('div.facetOverflow'):
if i.text == '(All)':
y = i
<DeepExtract>
y.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
y.click()
</DeepExtract>
for i in range(offset):
gp = browser.find_element_by_css_selector('div.wcGlassPane')
try:
WebDriverWait(browser, 10).until(EC.staleness_of(gp))
print('Loaded next week % d' % (53 - offset))
except TimeoutException:
print('Loading next week %d took too much time!' % (53 - offset))
gp = browser.find_element_by_css_selector('div.wcGlassPane')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
<DeepExtract>
x.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
x.click()
</DeepExtract>
for i in range(54 - offset):
try:
print('Loading week %d' % (53 - i))
browser.switch_to.window(tab2)
browser.get(dataurl)
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "li[id='tab-view-full-data']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "li[id='tab-view-full-data']")))
print('Success Loading %s' % "li[id='tab-view-full-data']")
except TimeoutException:
print('Loading %s took too much time!' % "li[id='tab-view-full-data']")
</DeepExtract>
full_data_tab = browser.find_elements_by_css_selector("li[id='tab-view-full-data']")[0]
full_data_tab.click()
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a.csvLink')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'a.csvLink')))
print('Success Loading %s' % 'a.csvLink')
except TimeoutException:
print('Loading %s took too much time!' % 'a.csvLink')
</DeepExtract>
data_links = browser.find_elements_by_css_selector('a.csvLink')
data_link = None
for i in data_links:
if i.get_property('href') != '':
data_link = i
break
data_link.click()
browser.switch_to.window(tab1)
<DeepExtract>
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')))
print('Success Loading %s' % 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
except TimeoutException:
print('Loading %s took too much time!' % 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
</DeepExtract>
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
<DeepExtract>
x.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
x.click()
</DeepExtract>
curr_offset += 1
except Exception as e:
print('Got exception %s\nTrying again from week %d' % (e, 53 - offset))
browser.quit()
<DeepExtract>
opts = Options()
opts.set_headless()
assert opts.headless
fp = FirefoxProfile()
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('browser.download.dir', os.path.abspath(dir))
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
browser = Firefox(options=opts, firefox_profile=fp)
browser.get('http://www.paho.org/data/index.php/en/mnu-topics/indicadores-dengue-en/dengue-nacional-en/252-dengue-pais-ano-en.html?showall=&start=1')
tab1 = browser.window_handles[0]
browser.execute_script('window.open("","_blank");')
tab2 = browser.window_handles[1]
browser.switch_to.window(tab1)
curr_offset = curr_offset
wait_for(browser, 'div.rt-top-inner', delay=30)
header = browser.find_element_by_css_selector('div.rt-top-inner')
global headerheight
headerheight = header.rect['height']
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption = browser.find_elements_by_css_selector('div.tabToolbarButton.tab-widget.download')[0]
find_and_click(browser, downloadoption)
wait_for(browser, "div[data-tb-test-id='DownloadImage-Button']")
imagebutton = browser.find_elements_by_css_selector("div[data-tb-test-id='DownloadImage-Button']")[0]
find_and_click(browser, imagebutton)
wait_for(browser, ".tabDownloadFileButton[data-test-id='DownloadLink']")
downloadbutton = browser.find_elements_by_css_selector(".tabDownloadFileButton[data-test-id='DownloadLink']")[0]
href = downloadbutton.get_attribute('href')
startidx = href.index('sessions/') + len('sessions/')
endidx = href.index('/', startidx)
sessionid = href[startidx:endidx]
dataurl = 'http://phip.paho.org/vizql/w/Casosdedengue_tben/v/ByLastAvailableEpiWeek/viewData/sessions/%s/views/18076444178507886853_9530488980060483892?maxrows=200&viz=%%7B%%22worksheet%%22:%%22W%%20By%%20Last%%20Available%%20EpiWeek%%22,%%22dashboard%%22:%%22By%%20Last%%20Available%%20Epi%%20Week%%22%%7D' % sessionid
wait_for(browser, "div[data-tb-test-id='CancelBtn-Button']")
cancelbutton = browser.find_elements_by_css_selector("div[data-tb-test-id='CancelBtn-Button']")[0]
find_and_click(browser, cancelbutton)
wait_for(browser, "div[id='tableau_base_widget_FilterPanel_0']")
yearselector = browser.find_elements_by_css_selector("div[id='tableau_base_widget_FilterPanel_0']")[0]
find_and_click(browser, yearselector)
wait_for(browser, 'div.facetOverflow')
y = None
for i in browser.find_elements_by_css_selector('div.facetOverflow'):
if i.text == '(All)':
y = i
find_and_click(browser, y)
for i in range(curr_offset):
gp = browser.find_element_by_css_selector('div.wcGlassPane')
try:
WebDriverWait(browser, 10).until(EC.staleness_of(gp))
print('Loaded next week % d' % (53 - curr_offset))
except TimeoutException:
print('Loading next week %d took too much time!' % (53 - curr_offset))
gp = browser.find_element_by_css_selector('div.wcGlassPane')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
find_and_click(browser, x)
for i in range(54 - curr_offset):
try:
print('Loading week %d' % (53 - i))
browser.switch_to.window(tab2)
browser.get(dataurl)
wait_for(browser, "li[id='tab-view-full-data']")
full_data_tab = browser.find_elements_by_css_selector("li[id='tab-view-full-data']")[0]
full_data_tab.click()
wait_for(browser, 'a.csvLink')
data_links = browser.find_elements_by_css_selector('a.csvLink')
data_link = None
for i in data_links:
if i.get_property('href') != '':
data_link = i
break
data_link.click()
browser.switch_to.window(tab1)
wait_for(browser, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
find_and_click(browser, x)
curr_offset += 1
except Exception as e:
print('Got exception %s\nTrying again from week %d' % (e, 53 - curr_offset))
browser.quit()
get_paho_data(offset=curr_offset)
browser.quit()
</DeepExtract>
browser.quit()
|
def get_paho_data(offset=0, dir='downloads'):
opts = Options()
opts.set_headless()
assert opts.headless
fp = FirefoxProfile()
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('browser.download.dir', os.path.abspath(dir))
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
browser = Firefox(options=opts, firefox_profile=fp)
browser.get('http://www.paho.org/data/index.php/en/mnu-topics/indicadores-dengue-en/dengue-nacional-en/252-dengue-pais-ano-en.html?showall=&start=1')
tab1 = browser.window_handles[0]
browser.execute_script('window.open("","_blank");')
tab2 = browser.window_handles[1]
browser.switch_to.window(tab1)
curr_offset = offset
try:
WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.rt-top-inner')))
WebDriverWait(browser, 30).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.rt-top-inner')))
print('Success Loading %s' % 'div.rt-top-inner')
except TimeoutException:
print('Loading %s took too much time!' % 'div.rt-top-inner')
header = browser.find_element_by_css_selector('div.rt-top-inner')
global headerheight
headerheight = header.rect['height']
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption = browser.find_elements_by_css_selector('div.tabToolbarButton.tab-widget.download')[0]
downloadoption.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption.click()
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[data-tb-test-id='DownloadImage-Button']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[data-tb-test-id='DownloadImage-Button']")))
print('Success Loading %s' % "div[data-tb-test-id='DownloadImage-Button']")
except TimeoutException:
print('Loading %s took too much time!' % "div[data-tb-test-id='DownloadImage-Button']")
imagebutton = browser.find_elements_by_css_selector("div[data-tb-test-id='DownloadImage-Button']")[0]
imagebutton.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
imagebutton.click()
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".tabDownloadFileButton[data-test-id='DownloadLink']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".tabDownloadFileButton[data-test-id='DownloadLink']")))
print('Success Loading %s' % ".tabDownloadFileButton[data-test-id='DownloadLink']")
except TimeoutException:
print('Loading %s took too much time!' % ".tabDownloadFileButton[data-test-id='DownloadLink']")
downloadbutton = browser.find_elements_by_css_selector(".tabDownloadFileButton[data-test-id='DownloadLink']")[0]
href = downloadbutton.get_attribute('href')
startidx = href.index('sessions/') + len('sessions/')
endidx = href.index('/', startidx)
sessionid = href[startidx:endidx]
dataurl = 'http://phip.paho.org/vizql/w/Casosdedengue_tben/v/ByLastAvailableEpiWeek/viewData/sessions/%s/views/18076444178507886853_9530488980060483892?maxrows=200&viz=%%7B%%22worksheet%%22:%%22W%%20By%%20Last%%20Available%%20EpiWeek%%22,%%22dashboard%%22:%%22By%%20Last%%20Available%%20Epi%%20Week%%22%%7D' % sessionid
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[data-tb-test-id='CancelBtn-Button']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[data-tb-test-id='CancelBtn-Button']")))
print('Success Loading %s' % "div[data-tb-test-id='CancelBtn-Button']")
except TimeoutException:
print('Loading %s took too much time!' % "div[data-tb-test-id='CancelBtn-Button']")
cancelbutton = browser.find_elements_by_css_selector("div[data-tb-test-id='CancelBtn-Button']")[0]
cancelbutton.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
cancelbutton.click()
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "div[id='tableau_base_widget_FilterPanel_0']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[id='tableau_base_widget_FilterPanel_0']")))
print('Success Loading %s' % "div[id='tableau_base_widget_FilterPanel_0']")
except TimeoutException:
print('Loading %s took too much time!' % "div[id='tableau_base_widget_FilterPanel_0']")
yearselector = browser.find_elements_by_css_selector("div[id='tableau_base_widget_FilterPanel_0']")[0]
yearselector.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
yearselector.click()
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.facetOverflow')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.facetOverflow')))
print('Success Loading %s' % 'div.facetOverflow')
except TimeoutException:
print('Loading %s took too much time!' % 'div.facetOverflow')
y = None
for i in browser.find_elements_by_css_selector('div.facetOverflow'):
if i.text == '(All)':
y = i
y.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
y.click()
for i in range(offset):
gp = browser.find_element_by_css_selector('div.wcGlassPane')
try:
WebDriverWait(browser, 10).until(EC.staleness_of(gp))
print('Loaded next week % d' % (53 - offset))
except TimeoutException:
print('Loading next week %d took too much time!' % (53 - offset))
gp = browser.find_element_by_css_selector('div.wcGlassPane')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
x.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
x.click()
for i in range(54 - offset):
try:
print('Loading week %d' % (53 - i))
browser.switch_to.window(tab2)
browser.get(dataurl)
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, "li[id='tab-view-full-data']")))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "li[id='tab-view-full-data']")))
print('Success Loading %s' % "li[id='tab-view-full-data']")
except TimeoutException:
print('Loading %s took too much time!' % "li[id='tab-view-full-data']")
full_data_tab = browser.find_elements_by_css_selector("li[id='tab-view-full-data']")[0]
full_data_tab.click()
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a.csvLink')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'a.csvLink')))
print('Success Loading %s' % 'a.csvLink')
except TimeoutException:
print('Loading %s took too much time!' % 'a.csvLink')
data_links = browser.find_elements_by_css_selector('a.csvLink')
data_link = None
for i in data_links:
if i.get_property('href') != '':
data_link = i
break
data_link.click()
browser.switch_to.window(tab1)
try:
WebDriverWait(browser, delay).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')))
WebDriverWait(browser, delay).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')))
print('Success Loading %s' % 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
except TimeoutException:
print('Loading %s took too much time!' % 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
x.location_once_scrolled_into_view
browser.switch_to.default_content()
browser.execute_script('window.scrollBy(0,-%d)' % headerheight)
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
x.click()
curr_offset += 1
except Exception as e:
print('Got exception %s\nTrying again from week %d' % (e, 53 - offset))
browser.quit()
opts = Options()
opts.set_headless()
assert opts.headless
fp = FirefoxProfile()
fp.set_preference('browser.download.folderList', 2)
fp.set_preference('browser.download.manager.showWhenStarting', False)
fp.set_preference('browser.download.dir', os.path.abspath(dir))
fp.set_preference('browser.helperApps.neverAsk.saveToDisk', 'text/csv')
browser = Firefox(options=opts, firefox_profile=fp)
browser.get('http://www.paho.org/data/index.php/en/mnu-topics/indicadores-dengue-en/dengue-nacional-en/252-dengue-pais-ano-en.html?showall=&start=1')
tab1 = browser.window_handles[0]
browser.execute_script('window.open("","_blank");')
tab2 = browser.window_handles[1]
browser.switch_to.window(tab1)
curr_offset = curr_offset
wait_for(browser, 'div.rt-top-inner', delay=30)
header = browser.find_element_by_css_selector('div.rt-top-inner')
global headerheight
headerheight = header.rect['height']
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
browser.switch_to.frame(browser.find_element_by_tag_name('iframe'))
downloadoption = browser.find_elements_by_css_selector('div.tabToolbarButton.tab-widget.download')[0]
find_and_click(browser, downloadoption)
wait_for(browser, "div[data-tb-test-id='DownloadImage-Button']")
imagebutton = browser.find_elements_by_css_selector("div[data-tb-test-id='DownloadImage-Button']")[0]
find_and_click(browser, imagebutton)
wait_for(browser, ".tabDownloadFileButton[data-test-id='DownloadLink']")
downloadbutton = browser.find_elements_by_css_selector(".tabDownloadFileButton[data-test-id='DownloadLink']")[0]
href = downloadbutton.get_attribute('href')
startidx = href.index('sessions/') + len('sessions/')
endidx = href.index('/', startidx)
sessionid = href[startidx:endidx]
dataurl = 'http://phip.paho.org/vizql/w/Casosdedengue_tben/v/ByLastAvailableEpiWeek/viewData/sessions/%s/views/18076444178507886853_9530488980060483892?maxrows=200&viz=%%7B%%22worksheet%%22:%%22W%%20By%%20Last%%20Available%%20EpiWeek%%22,%%22dashboard%%22:%%22By%%20Last%%20Available%%20Epi%%20Week%%22%%7D' % sessionid
wait_for(browser, "div[data-tb-test-id='CancelBtn-Button']")
cancelbutton = browser.find_elements_by_css_selector("div[data-tb-test-id='CancelBtn-Button']")[0]
find_and_click(browser, cancelbutton)
wait_for(browser, "div[id='tableau_base_widget_FilterPanel_0']")
yearselector = browser.find_elements_by_css_selector("div[id='tableau_base_widget_FilterPanel_0']")[0]
find_and_click(browser, yearselector)
wait_for(browser, 'div.facetOverflow')
y = None
for i in browser.find_elements_by_css_selector('div.facetOverflow'):
if i.text == '(All)':
y = i
find_and_click(browser, y)
for i in range(curr_offset):
gp = browser.find_element_by_css_selector('div.wcGlassPane')
try:
WebDriverWait(browser, 10).until(EC.staleness_of(gp))
print('Loaded next week % d' % (53 - curr_offset))
except TimeoutException:
print('Loading next week %d took too much time!' % (53 - curr_offset))
gp = browser.find_element_by_css_selector('div.wcGlassPane')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
find_and_click(browser, x)
for i in range(54 - curr_offset):
try:
print('Loading week %d' % (53 - i))
browser.switch_to.window(tab2)
browser.get(dataurl)
wait_for(browser, "li[id='tab-view-full-data']")
full_data_tab = browser.find_elements_by_css_selector("li[id='tab-view-full-data']")[0]
full_data_tab.click()
wait_for(browser, 'a.csvLink')
data_links = browser.find_elements_by_css_selector('a.csvLink')
data_link = None
for i in data_links:
if i.get_property('href') != '':
data_link = i
break
data_link.click()
browser.switch_to.window(tab1)
wait_for(browser, 'div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')
x = browser.find_elements_by_css_selector('div.dijitReset.dijitSliderButtonContainer.dijitSliderButtonContainerH.tableauArrowDec')[0]
find_and_click(browser, x)
curr_offset += 1
except Exception as e:
print('Got exception %s\nTrying again from week %d' % (e, 53 - curr_offset))
browser.quit()
get_paho_data(offset=curr_offset)
browser.quit()
browser.quit()
|
delphi-epidata
|
positive
|
def save_prediction_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):
context_frames = model_hparams.context_frames
sequence_length = model_hparams.sequence_length
context_images = results['context']
images = results['images']
gen_images = results['gen_images']
mse = metrics.mean_squared_error_np(images, gen_images, keep_axis=(0, 1))
<DeepExtract>
(head, tail) = os.path.split(os.path.join(task_dir, 'metrics', 'mse'))
if head and (not os.path.exists(head)):
os.makedirs(head)
assert mse.ndim == 2
file_mode = 'wb' if sample_start_ind == 0 else 'ab'
with io.open('%s.csv' % os.path.join(task_dir, 'metrics', 'mse'), file_mode) as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
if sample_start_ind == 0:
writer.writerow(map(str, ['sample_ind'] + list(range(mse.shape[1])) + ['mean']))
for (i, metrics_row) in enumerate(mse):
writer.writerow(map(unicode, map(str, [sample_start_ind + i] + list(metrics_row) + [np.mean(metrics_row)])))
</DeepExtract>
if only_metrics:
return
<DeepExtract>
(head, tail) = os.path.split(os.path.join(task_dir, 'inputs', 'context_image'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(context_images)
if centers is None:
centers = [None] * len(context_images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(context_images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'inputs', 'context_image'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
</DeepExtract>
<DeepExtract>
(head, tail) = os.path.split(os.path.join(task_dir, 'inputs', 'gt'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(images)
if centers is None:
centers = [None] * len(images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'inputs', 'gt'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
</DeepExtract>
<DeepExtract>
(head, tail) = os.path.split(os.path.join(task_dir, 'outputs', 'gen_image'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(gen_images)
if centers is None:
centers = [None] * len(gen_images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(gen_images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'outputs', 'gen_image'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
</DeepExtract>
|
def save_prediction_results(task_dir, results, model_hparams, sample_start_ind=0, only_metrics=False):
context_frames = model_hparams.context_frames
sequence_length = model_hparams.sequence_length
context_images = results['context']
images = results['images']
gen_images = results['gen_images']
mse = metrics.mean_squared_error_np(images, gen_images, keep_axis=(0, 1))
(head, tail) = os.path.split(os.path.join(task_dir, 'metrics', 'mse'))
if head and (not os.path.exists(head)):
os.makedirs(head)
assert mse.ndim == 2
file_mode = 'wb' if sample_start_ind == 0 else 'ab'
with io.open('%s.csv' % os.path.join(task_dir, 'metrics', 'mse'), file_mode) as csvfile:
writer = csv.writer(csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
if sample_start_ind == 0:
writer.writerow(map(str, ['sample_ind'] + list(range(mse.shape[1])) + ['mean']))
for (i, metrics_row) in enumerate(mse):
writer.writerow(map(unicode, map(str, [sample_start_ind + i] + list(metrics_row) + [np.mean(metrics_row)])))
if only_metrics:
return
(head, tail) = os.path.split(os.path.join(task_dir, 'inputs', 'context_image'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(context_images)
if centers is None:
centers = [None] * len(context_images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(context_images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'inputs', 'context_image'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
(head, tail) = os.path.split(os.path.join(task_dir, 'inputs', 'gt'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(images)
if centers is None:
centers = [None] * len(images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'inputs', 'gt'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
(head, tail) = os.path.split(os.path.join(task_dir, 'outputs', 'gen_image'))
if head and (not os.path.exists(head)):
os.makedirs(head)
if overlaid_images is None:
overlaid_images = [None] * len(gen_images)
if centers is None:
centers = [None] * len(gen_images)
for (i, (images_, overlaid_images_, centers_)) in enumerate(zip(gen_images, overlaid_images, centers)):
images_fname = '%s_%05d' % (os.path.join(task_dir, 'outputs', 'gen_image'), sample_start_ind + i)
save_image_sequence(images_fname, images_, overlaid_images_, centers_, radius=radius, alpha=alpha, time_start_ind=time_start_ind)
</DeepExtract>
|
DSGAN
|
positive
|
def get_fallback_mount_target_ip_address(config, options, fs_id, dns_name):
<DeepExtract>
fall_back_to_ip_address_enabled = get_boolean_config_item_value(config, CONFIG_SECTION, FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, default_value=DEFAULT_FALLBACK_ENABLED)
</DeepExtract>
if not fall_back_to_ip_address_enabled:
fallback_message = 'Fallback to mount target ip address feature is not enabled in config file %s.' % CONFIG_FILE
raise FallbackException(fallback_message)
if not BOTOCORE_PRESENT:
fallback_message = 'Failed to import necessary dependency botocore, please install botocore first.'
raise FallbackException(fallback_message)
mount_target_ip_address = None
try:
<DeepExtract>
az_name = get_target_az(config, options)
ec2_client = get_botocore_client(config, 'ec2', options)
efs_client = get_botocore_client(config, 'efs', options)
mount_target = get_mount_target_in_az(efs_client, ec2_client, fs_id, az_name)
mount_target_ip = mount_target.get('IpAddress')
logging.debug('Found mount target ip address %s in AZ %s', mount_target_ip, az_name)
mount_target_ip_address = mount_target_ip
</DeepExtract>
<DeepExtract>
tries = 3
for attempt in range(tries):
try:
if not options.get('netns') if 'netns' in options else None:
s = socket.create_connection((mount_target_ip_address, 2049), timeout=2)
else:
with NetNS(nspath=options.get('netns') if 'netns' in options else None):
s = socket.create_connection((mount_target_ip_address, 2049), timeout=2)
s.close()
return True
except socket.timeout:
if attempt < tries - 1:
message = 'The ip address %s cannot be connected yet, sleep 0.5s, %s retry time(s) left' % (mount_target_ip_address, tries - attempt - 1)
logging.warning(message)
time.sleep(0.5)
continue
else:
raise FallbackException('Connection to the mount target IP address %s timeout. Please retry in 5 minutes if the mount target is newly created. Otherwise check your VPC and security group configuration to ensure your file system is reachable via TCP port 2049 from your instance.' % mount_target_ip_address)
except Exception as e:
hint_message = ' Please check if the mount target ip address passed via mount option is correct.' if passed_via_options else ''
raise FallbackException('Unknown error when connecting to mount target IP address %s, %s.%s' % (mount_target_ip_address, e, hint_message))
</DeepExtract>
return mount_target_ip_address
except FallbackException as e:
<DeepExtract>
dns_message = 'Failed to resolve "%s". ' % dns_name if dns_name else ''
if not mount_target_ip_address:
ip_address_message = 'The file system mount target ip address cannot be found, please pass mount target ip address via mount options. '
else:
ip_address_message = 'Cannot connect to file system mount target ip address %s. ' % mount_target_ip_address
e.message = '\n%s' % e.message if e.message else ''
fatal_error('%s%s%s' % (dns_message, ip_address_message, e.message))
</DeepExtract>
|
def get_fallback_mount_target_ip_address(config, options, fs_id, dns_name):
fall_back_to_ip_address_enabled = get_boolean_config_item_value(config, CONFIG_SECTION, FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, default_value=DEFAULT_FALLBACK_ENABLED)
if not fall_back_to_ip_address_enabled:
fallback_message = 'Fallback to mount target ip address feature is not enabled in config file %s.' % CONFIG_FILE
raise FallbackException(fallback_message)
if not BOTOCORE_PRESENT:
fallback_message = 'Failed to import necessary dependency botocore, please install botocore first.'
raise FallbackException(fallback_message)
mount_target_ip_address = None
try:
az_name = get_target_az(config, options)
ec2_client = get_botocore_client(config, 'ec2', options)
efs_client = get_botocore_client(config, 'efs', options)
mount_target = get_mount_target_in_az(efs_client, ec2_client, fs_id, az_name)
mount_target_ip = mount_target.get('IpAddress')
logging.debug('Found mount target ip address %s in AZ %s', mount_target_ip, az_name)
mount_target_ip_address = mount_target_ip
tries = 3
for attempt in range(tries):
try:
if not options.get('netns') if 'netns' in options else None:
s = socket.create_connection((mount_target_ip_address, 2049), timeout=2)
else:
with NetNS(nspath=options.get('netns') if 'netns' in options else None):
s = socket.create_connection((mount_target_ip_address, 2049), timeout=2)
s.close()
return True
except socket.timeout:
if attempt < tries - 1:
message = 'The ip address %s cannot be connected yet, sleep 0.5s, %s retry time(s) left' % (mount_target_ip_address, tries - attempt - 1)
logging.warning(message)
time.sleep(0.5)
continue
else:
raise FallbackException('Connection to the mount target IP address %s timeout. Please retry in 5 minutes if the mount target is newly created. Otherwise check your VPC and security group configuration to ensure your file system is reachable via TCP port 2049 from your instance.' % mount_target_ip_address)
except Exception as e:
hint_message = ' Please check if the mount target ip address passed via mount option is correct.' if passed_via_options else ''
raise FallbackException('Unknown error when connecting to mount target IP address %s, %s.%s' % (mount_target_ip_address, e, hint_message))
return mount_target_ip_address
except FallbackException as e:
dns_message = 'Failed to resolve "%s". ' % dns_name if dns_name else ''
if not mount_target_ip_address:
ip_address_message = 'The file system mount target ip address cannot be found, please pass mount target ip address via mount options. '
else:
ip_address_message = 'Cannot connect to file system mount target ip address %s. ' % mount_target_ip_address
e.message = '\n%s' % e.message if e.message else ''
fatal_error('%s%s%s' % (dns_message, ip_address_message, e.message))
</DeepExtract>
|
efs-utils
|
positive
|
def call(self, targets, inputs, outputs=None):
<DeepExtract>
if bm.as_jax(inputs).ndim < 2:
raise ValueError(f'Data must be a 2d tensor. But we got {bm.as_jax(inputs).ndim}d: {bm.as_jax(inputs).shape}.')
if bm.as_jax(inputs).ndim != 2:
inputs = bm.flatten(bm.as_jax(inputs), end_dim=-2)
else:
inputs = bm.as_jax(inputs)
</DeepExtract>
<DeepExtract>
if bm.as_jax(targets).ndim < 2:
raise ValueError(f'Data must be a 2d tensor. But we got {bm.as_jax(targets).ndim}d: {bm.as_jax(targets).shape}.')
if bm.as_jax(targets).ndim != 2:
targets = bm.flatten(bm.as_jax(targets), end_dim=-2)
else:
targets = bm.as_jax(targets)
</DeepExtract>
inputs = normalize(polynomial_features(inputs, degree=self.degree, add_bias=self.add_bias))
return super(LassoRegression, self).gradient_descent_solve(targets, inputs)
|
def call(self, targets, inputs, outputs=None):
if bm.as_jax(inputs).ndim < 2:
raise ValueError(f'Data must be a 2d tensor. But we got {bm.as_jax(inputs).ndim}d: {bm.as_jax(inputs).shape}.')
if bm.as_jax(inputs).ndim != 2:
inputs = bm.flatten(bm.as_jax(inputs), end_dim=-2)
else:
inputs = bm.as_jax(inputs)
if bm.as_jax(targets).ndim < 2:
raise ValueError(f'Data must be a 2d tensor. But we got {bm.as_jax(targets).ndim}d: {bm.as_jax(targets).shape}.')
if bm.as_jax(targets).ndim != 2:
targets = bm.flatten(bm.as_jax(targets), end_dim=-2)
else:
targets = bm.as_jax(targets)
inputs = normalize(polynomial_features(inputs, degree=self.degree, add_bias=self.add_bias))
return super(LassoRegression, self).gradient_descent_solve(targets, inputs)
|
BrainPy
|
positive
|
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
self._max_search -= len(s) + 1 - len(pat)
if self._max_search < 0:
<DeepExtract>
if not self.closed:
self.connection.close()
self.closed = True
</DeepExtract>
return False
<DeepExtract>
self.buffer = s[1 - len(pat):] + self.buffer
</DeepExtract>
return False
<DeepExtract>
self.buffer = s[p + len(pat):] + self.buffer
</DeepExtract>
return True
|
def _search_for_pattern(self, s, pat):
p = s.find(pat)
if p < 0:
self._max_search -= len(s) + 1 - len(pat)
if self._max_search < 0:
if not self.closed:
self.connection.close()
self.closed = True
return False
self.buffer = s[1 - len(pat):] + self.buffer
return False
self.buffer = s[p + len(pat):] + self.buffer
return True
|
BitTornado
|
positive
|
def sendto(self, data, addr, endpoint):
print('sendto ' + str(endpoint))
<DeepExtract>
packet = OnionPacket()
packet.createOnionPacket(self.keypair, endpoint, data, self.keys.entropy)
packet = packet
</DeepExtract>
print('Sending')
print(packet)
self.sock.sendto(packet.packet, 0, addr)
|
def sendto(self, data, addr, endpoint):
print('sendto ' + str(endpoint))
packet = OnionPacket()
packet.createOnionPacket(self.keypair, endpoint, data, self.keys.entropy)
packet = packet
print('Sending')
print(packet)
self.sock.sendto(packet.packet, 0, addr)
|
Dust
|
positive
|
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
args.steps = int(np.ceil(45000 / args.child_batch_size)) * args.child_epochs
logging.info('args = %s', args)
if args.child_arch_pool is not None:
logging.info('Architecture pool is provided, loading')
with open(args.child_arch_pool) as f:
archs = f.read().splitlines()
archs = list(map(utils.build_dag, archs))
child_arch_pool = archs
elif os.path.exists(os.path.join(args.output_dir, 'arch_pool')):
logging.info('Architecture pool is founded, loading')
with open(os.path.join(args.output_dir, 'arch_pool')) as f:
archs = f.read().splitlines()
archs = list(map(utils.build_dag, archs))
child_arch_pool = archs
else:
child_arch_pool = None
child_eval_epochs = eval(args.child_eval_epochs)
<DeepExtract>
if args.dataset == 'cifar10':
build_fn = build_cifar10
elif args.dataset == 'cifar100':
build_fn = build_cifar100
else:
build_fn = build_imagenet
</DeepExtract>
(train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler) = build_fn(ratio=0.9, epoch=-1)
nao = NAO(args.controller_encoder_layers, args.controller_encoder_vocab_size, args.controller_encoder_hidden_size, args.controller_encoder_dropout, args.controller_encoder_length, args.controller_source_length, args.controller_encoder_emb_size, args.controller_mlp_layers, args.controller_mlp_hidden_size, args.controller_mlp_dropout, args.controller_decoder_layers, args.controller_decoder_vocab_size, args.controller_decoder_hidden_size, args.controller_decoder_dropout, args.controller_decoder_length)
nao = nao.cuda()
logging.info('Encoder-Predictor-Decoder param size = %fMB', utils.count_parameters_in_MB(nao))
if child_arch_pool is None:
logging.info('Architecture pool is not provided, randomly generating now')
child_arch_pool = utils.generate_arch(args.controller_seed_arch, args.child_nodes, 5)
if args.child_sample_policy == 'params':
child_arch_pool_prob = []
for arch in child_arch_pool:
if args.dataset == 'cifar10':
tmp_model = NASNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
elif args.dataset == 'cifar100':
tmp_model = NASNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
else:
tmp_model = NASNetworkImageNet(args, 1000, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
child_arch_pool_prob.append(utils.count_parameters_in_MB(tmp_model))
del tmp_model
else:
child_arch_pool_prob = None
eval_points = utils.generate_eval_points(child_eval_epochs, 0, args.child_epochs)
step = 0
for epoch in range(1, args.child_epochs + 1):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
<DeepExtract>
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
input = input.cuda().requires_grad_()
target = target.cuda()
optimizer.zero_grad()
arch = utils.sample_arch(child_arch_pool, child_arch_pool_prob)
(logits, aux_logits) = model(input, arch, step)
step += 1
loss = train_criterion(logits, target)
if aux_logits is not None:
aux_loss = train_criterion(aux_logits, target)
loss += 0.4 * aux_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.child_grad_bound)
optimizer.step()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if (step + 1) % 100 == 0:
logging.info('Train %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
logging.info('Arch: %s', ' '.join(map(str, arch[0] + arch[1])))
(train_acc, train_obj, step) = (top1.avg, objs.avg, step)
</DeepExtract>
logging.info('train_acc %f', train_acc)
if epoch not in eval_points:
continue
<DeepExtract>
valid_acc_list = []
with torch.no_grad():
model.eval()
for (i, arch) in enumerate(child_arch_pool):
(inputs, targets) = next(iter(valid_queue))
inputs = inputs.cuda()
targets = targets.cuda()
(logits, _) = model(inputs, arch, bn_train=True)
loss = eval_criterion(logits, targets)
(prec1, prec5) = utils.accuracy(logits, targets, topk=(1, 5))
valid_acc_list.append(prec1.data / 100)
if (i + 1) % 100 == 0:
logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5)
valid_accuracy_list = valid_acc_list
</DeepExtract>
old_archs = child_arch_pool
old_archs_perf = valid_accuracy_list
old_archs_sorted_indices = np.argsort(old_archs_perf)[::-1]
old_archs = [old_archs[i] for i in old_archs_sorted_indices]
old_archs_perf = [old_archs_perf[i] for i in old_archs_sorted_indices]
with open(os.path.join(args.output_dir, 'arch_pool.{}'.format(epoch)), 'w') as fa:
with open(os.path.join(args.output_dir, 'arch_pool.perf.{}'.format(epoch)), 'w') as fp:
with open(os.path.join(args.output_dir, 'arch_pool'), 'w') as fa_latest:
with open(os.path.join(args.output_dir, 'arch_pool.perf'), 'w') as fp_latest:
for (arch, perf) in zip(old_archs, old_archs_perf):
arch = ' '.join(map(str, arch[0] + arch[1]))
fa.write('{}\n'.format(arch))
fa_latest.write('{}\n'.format(arch))
fp.write('{}\n'.format(perf))
fp_latest.write('{}\n'.format(perf))
if epoch == args.child_epochs:
break
logging.info('Training Encoder-Predictor-Decoder')
encoder_input = list(map(lambda x: utils.parse_arch_to_seq(x[0], 2) + utils.parse_arch_to_seq(x[1], 2), old_archs))
min_val = min(old_archs_perf)
max_val = max(old_archs_perf)
encoder_target = [(i - min_val) / (max_val - min_val) for i in old_archs_perf]
if args.controller_expand is not None:
dataset = list(zip(encoder_input, encoder_target))
n = len(dataset)
ratio = 0.9
split = int(n * ratio)
np.random.shuffle(dataset)
(encoder_input, encoder_target) = list(zip(*dataset))
train_encoder_input = list(encoder_input[:split])
train_encoder_target = list(encoder_target[:split])
valid_encoder_input = list(encoder_input[split:])
valid_encoder_target = list(encoder_target[split:])
for _ in range(args.controller_expand - 1):
for (src, tgt) in zip(encoder_input[:split], encoder_target[:split]):
a = np.random.randint(0, args.child_nodes)
b = np.random.randint(0, args.child_nodes)
src = src[:4 * a] + src[4 * a + 2:4 * a + 4] + src[4 * a:4 * a + 2] + src[4 * (a + 1):20 + 4 * b] + src[20 + 4 * b + 2:20 + 4 * b + 4] + src[20 + 4 * b:20 + 4 * b + 2] + src[20 + 4 * (b + 1):]
train_encoder_input.append(src)
train_encoder_target.append(tgt)
else:
train_encoder_input = encoder_input
train_encoder_target = encoder_target
valid_encoder_input = encoder_input
valid_encoder_target = encoder_target
logging.info('Train data: {}\tValid data: {}'.format(len(train_encoder_input), len(valid_encoder_input)))
nao_train_dataset = utils.NAODataset(train_encoder_input, train_encoder_target, True, swap=True if args.controller_expand is None else False)
nao_valid_dataset = utils.NAODataset(valid_encoder_input, valid_encoder_target, False)
nao_train_queue = torch.utils.data.DataLoader(nao_train_dataset, batch_size=args.controller_batch_size, shuffle=True, pin_memory=True)
nao_valid_queue = torch.utils.data.DataLoader(nao_valid_dataset, batch_size=args.controller_batch_size, shuffle=False, pin_memory=True)
nao_optimizer = torch.optim.Adam(nao.parameters(), lr=args.controller_lr, weight_decay=args.controller_l2_reg)
for nao_epoch in range(1, args.controller_epochs + 1):
<DeepExtract>
objs = utils.AvgrageMeter()
mse = utils.AvgrageMeter()
nll = utils.AvgrageMeter()
nao.train()
for (step, sample) in enumerate(nao_train_queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_input = sample['decoder_input']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda().requires_grad_()
decoder_input = decoder_input.cuda()
decoder_target = decoder_target.cuda()
nao_optimizer.zero_grad()
(predict_value, log_prob, arch) = nao(encoder_input, decoder_input)
loss_1 = F.mse_loss(predict_value.squeeze(), encoder_target.squeeze())
loss_2 = F.nll_loss(log_prob.contiguous().view(-1, log_prob.size(-1)), decoder_target.view(-1))
loss = args.controller_trade_off * loss_1 + (1 - args.controller_trade_off) * loss_2
loss.backward()
torch.nn.utils.clip_grad_norm_(nao.parameters(), args.controller_grad_bound)
nao_optimizer.step()
n = encoder_input.size(0)
objs.update(loss.data, n)
mse.update(loss_1.data, n)
nll.update(loss_2.data, n)
(nao_loss, nao_mse, nao_ce) = (objs.avg, mse.avg, nll.avg)
</DeepExtract>
logging.info('epoch %04d train loss %.6f mse %.6f ce %.6f', nao_epoch, nao_loss, nao_mse, nao_ce)
if nao_epoch % 100 == 0:
<DeepExtract>
pa = utils.AvgrageMeter()
hs = utils.AvgrageMeter()
with torch.no_grad():
nao.eval()
for (step, sample) in enumerate(nao_valid_queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda()
decoder_target = decoder_target.cuda()
(predict_value, logits, arch) = nao(encoder_input)
n = encoder_input.size(0)
pairwise_acc = utils.pairwise_accuracy(encoder_target.data.squeeze().tolist(), predict_value.data.squeeze().tolist())
hamming_dis = utils.hamming_distance(decoder_target.data.squeeze().tolist(), arch.data.squeeze().tolist())
pa.update(pairwise_acc, n)
hs.update(hamming_dis, n)
(pa, hs) = (pa.avg, hs.avg)
</DeepExtract>
logging.info('Evaluation on valid data')
logging.info('epoch %04d pairwise accuracy %.6f hamming distance %.6f', epoch, pa, hs)
new_archs = []
max_step_size = 50
predict_step_size = 0
top100_archs = list(map(lambda x: utils.parse_arch_to_seq(x[0], 2) + utils.parse_arch_to_seq(x[1], 2), old_archs[:100]))
nao_infer_dataset = utils.NAODataset(top100_archs, None, False)
nao_infer_queue = torch.utils.data.DataLoader(nao_infer_dataset, batch_size=len(nao_infer_dataset), shuffle=False, pin_memory=True)
while len(new_archs) < args.controller_new_arch:
predict_step_size += 1
logging.info('Generate new architectures with step size %d', predict_step_size)
<DeepExtract>
new_arch_list = []
nao.eval()
for (i, sample) in enumerate(nao_infer_queue):
encoder_input = sample['encoder_input']
encoder_input = encoder_input.cuda()
nao.zero_grad()
new_arch = nao.generate_new_arch(encoder_input, predict_step_size, direction='+')
new_arch_list.extend(new_arch.data.squeeze().tolist())
new_arch = new_arch_list
</DeepExtract>
for arch in new_arch:
if arch not in encoder_input and arch not in new_archs:
new_archs.append(arch)
if len(new_archs) >= args.controller_new_arch:
break
logging.info('%d new archs generated now', len(new_archs))
if predict_step_size > max_step_size:
break
new_archs = list(map(lambda x: utils.parse_seq_to_arch(x, 2), new_archs))
num_new_archs = len(new_archs)
logging.info('Generate %d new archs', num_new_archs)
if args.controller_replace:
new_arch_pool = old_archs[:len(old_archs) - (num_new_archs + args.controller_random_arch)] + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
elif args.controller_discard:
new_arch_pool = old_archs[:100] + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
else:
new_arch_pool = old_archs + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
logging.info('Totally %d architectures now to train', len(new_arch_pool))
child_arch_pool = new_arch_pool
with open(os.path.join(args.output_dir, 'arch_pool'), 'w') as f:
for arch in new_arch_pool:
arch = ' '.join(map(str, arch[0] + arch[1]))
f.write('{}\n'.format(arch))
if args.child_sample_policy == 'params':
child_arch_pool_prob = []
for arch in child_arch_pool:
if args.dataset == 'cifar10':
tmp_model = NASNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
elif args.dataset == 'cifar100':
tmp_model = NASNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
else:
tmp_model = NASNetworkImageNet(args, 1000, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
child_arch_pool_prob.append(utils.count_parameters_in_MB(tmp_model))
del tmp_model
else:
child_arch_pool_prob = None
|
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
args.steps = int(np.ceil(45000 / args.child_batch_size)) * args.child_epochs
logging.info('args = %s', args)
if args.child_arch_pool is not None:
logging.info('Architecture pool is provided, loading')
with open(args.child_arch_pool) as f:
archs = f.read().splitlines()
archs = list(map(utils.build_dag, archs))
child_arch_pool = archs
elif os.path.exists(os.path.join(args.output_dir, 'arch_pool')):
logging.info('Architecture pool is founded, loading')
with open(os.path.join(args.output_dir, 'arch_pool')) as f:
archs = f.read().splitlines()
archs = list(map(utils.build_dag, archs))
child_arch_pool = archs
else:
child_arch_pool = None
child_eval_epochs = eval(args.child_eval_epochs)
if args.dataset == 'cifar10':
build_fn = build_cifar10
elif args.dataset == 'cifar100':
build_fn = build_cifar100
else:
build_fn = build_imagenet
(train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler) = build_fn(ratio=0.9, epoch=-1)
nao = NAO(args.controller_encoder_layers, args.controller_encoder_vocab_size, args.controller_encoder_hidden_size, args.controller_encoder_dropout, args.controller_encoder_length, args.controller_source_length, args.controller_encoder_emb_size, args.controller_mlp_layers, args.controller_mlp_hidden_size, args.controller_mlp_dropout, args.controller_decoder_layers, args.controller_decoder_vocab_size, args.controller_decoder_hidden_size, args.controller_decoder_dropout, args.controller_decoder_length)
nao = nao.cuda()
logging.info('Encoder-Predictor-Decoder param size = %fMB', utils.count_parameters_in_MB(nao))
if child_arch_pool is None:
logging.info('Architecture pool is not provided, randomly generating now')
child_arch_pool = utils.generate_arch(args.controller_seed_arch, args.child_nodes, 5)
if args.child_sample_policy == 'params':
child_arch_pool_prob = []
for arch in child_arch_pool:
if args.dataset == 'cifar10':
tmp_model = NASNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
elif args.dataset == 'cifar100':
tmp_model = NASNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
else:
tmp_model = NASNetworkImageNet(args, 1000, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
child_arch_pool_prob.append(utils.count_parameters_in_MB(tmp_model))
del tmp_model
else:
child_arch_pool_prob = None
eval_points = utils.generate_eval_points(child_eval_epochs, 0, args.child_epochs)
step = 0
for epoch in range(1, args.child_epochs + 1):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
input = input.cuda().requires_grad_()
target = target.cuda()
optimizer.zero_grad()
arch = utils.sample_arch(child_arch_pool, child_arch_pool_prob)
(logits, aux_logits) = model(input, arch, step)
step += 1
loss = train_criterion(logits, target)
if aux_logits is not None:
aux_loss = train_criterion(aux_logits, target)
loss += 0.4 * aux_loss
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.child_grad_bound)
optimizer.step()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if (step + 1) % 100 == 0:
logging.info('Train %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
logging.info('Arch: %s', ' '.join(map(str, arch[0] + arch[1])))
(train_acc, train_obj, step) = (top1.avg, objs.avg, step)
logging.info('train_acc %f', train_acc)
if epoch not in eval_points:
continue
valid_acc_list = []
with torch.no_grad():
model.eval()
for (i, arch) in enumerate(child_arch_pool):
(inputs, targets) = next(iter(valid_queue))
inputs = inputs.cuda()
targets = targets.cuda()
(logits, _) = model(inputs, arch, bn_train=True)
loss = eval_criterion(logits, targets)
(prec1, prec5) = utils.accuracy(logits, targets, topk=(1, 5))
valid_acc_list.append(prec1.data / 100)
if (i + 1) % 100 == 0:
logging.info('Valid arch %s\n loss %.2f top1 %f top5 %f', ' '.join(map(str, arch[0] + arch[1])), loss, prec1, prec5)
valid_accuracy_list = valid_acc_list
old_archs = child_arch_pool
old_archs_perf = valid_accuracy_list
old_archs_sorted_indices = np.argsort(old_archs_perf)[::-1]
old_archs = [old_archs[i] for i in old_archs_sorted_indices]
old_archs_perf = [old_archs_perf[i] for i in old_archs_sorted_indices]
with open(os.path.join(args.output_dir, 'arch_pool.{}'.format(epoch)), 'w') as fa:
with open(os.path.join(args.output_dir, 'arch_pool.perf.{}'.format(epoch)), 'w') as fp:
with open(os.path.join(args.output_dir, 'arch_pool'), 'w') as fa_latest:
with open(os.path.join(args.output_dir, 'arch_pool.perf'), 'w') as fp_latest:
for (arch, perf) in zip(old_archs, old_archs_perf):
arch = ' '.join(map(str, arch[0] + arch[1]))
fa.write('{}\n'.format(arch))
fa_latest.write('{}\n'.format(arch))
fp.write('{}\n'.format(perf))
fp_latest.write('{}\n'.format(perf))
if epoch == args.child_epochs:
break
logging.info('Training Encoder-Predictor-Decoder')
encoder_input = list(map(lambda x: utils.parse_arch_to_seq(x[0], 2) + utils.parse_arch_to_seq(x[1], 2), old_archs))
min_val = min(old_archs_perf)
max_val = max(old_archs_perf)
encoder_target = [(i - min_val) / (max_val - min_val) for i in old_archs_perf]
if args.controller_expand is not None:
dataset = list(zip(encoder_input, encoder_target))
n = len(dataset)
ratio = 0.9
split = int(n * ratio)
np.random.shuffle(dataset)
(encoder_input, encoder_target) = list(zip(*dataset))
train_encoder_input = list(encoder_input[:split])
train_encoder_target = list(encoder_target[:split])
valid_encoder_input = list(encoder_input[split:])
valid_encoder_target = list(encoder_target[split:])
for _ in range(args.controller_expand - 1):
for (src, tgt) in zip(encoder_input[:split], encoder_target[:split]):
a = np.random.randint(0, args.child_nodes)
b = np.random.randint(0, args.child_nodes)
src = src[:4 * a] + src[4 * a + 2:4 * a + 4] + src[4 * a:4 * a + 2] + src[4 * (a + 1):20 + 4 * b] + src[20 + 4 * b + 2:20 + 4 * b + 4] + src[20 + 4 * b:20 + 4 * b + 2] + src[20 + 4 * (b + 1):]
train_encoder_input.append(src)
train_encoder_target.append(tgt)
else:
train_encoder_input = encoder_input
train_encoder_target = encoder_target
valid_encoder_input = encoder_input
valid_encoder_target = encoder_target
logging.info('Train data: {}\tValid data: {}'.format(len(train_encoder_input), len(valid_encoder_input)))
nao_train_dataset = utils.NAODataset(train_encoder_input, train_encoder_target, True, swap=True if args.controller_expand is None else False)
nao_valid_dataset = utils.NAODataset(valid_encoder_input, valid_encoder_target, False)
nao_train_queue = torch.utils.data.DataLoader(nao_train_dataset, batch_size=args.controller_batch_size, shuffle=True, pin_memory=True)
nao_valid_queue = torch.utils.data.DataLoader(nao_valid_dataset, batch_size=args.controller_batch_size, shuffle=False, pin_memory=True)
nao_optimizer = torch.optim.Adam(nao.parameters(), lr=args.controller_lr, weight_decay=args.controller_l2_reg)
for nao_epoch in range(1, args.controller_epochs + 1):
objs = utils.AvgrageMeter()
mse = utils.AvgrageMeter()
nll = utils.AvgrageMeter()
nao.train()
for (step, sample) in enumerate(nao_train_queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_input = sample['decoder_input']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda().requires_grad_()
decoder_input = decoder_input.cuda()
decoder_target = decoder_target.cuda()
nao_optimizer.zero_grad()
(predict_value, log_prob, arch) = nao(encoder_input, decoder_input)
loss_1 = F.mse_loss(predict_value.squeeze(), encoder_target.squeeze())
loss_2 = F.nll_loss(log_prob.contiguous().view(-1, log_prob.size(-1)), decoder_target.view(-1))
loss = args.controller_trade_off * loss_1 + (1 - args.controller_trade_off) * loss_2
loss.backward()
torch.nn.utils.clip_grad_norm_(nao.parameters(), args.controller_grad_bound)
nao_optimizer.step()
n = encoder_input.size(0)
objs.update(loss.data, n)
mse.update(loss_1.data, n)
nll.update(loss_2.data, n)
(nao_loss, nao_mse, nao_ce) = (objs.avg, mse.avg, nll.avg)
logging.info('epoch %04d train loss %.6f mse %.6f ce %.6f', nao_epoch, nao_loss, nao_mse, nao_ce)
if nao_epoch % 100 == 0:
pa = utils.AvgrageMeter()
hs = utils.AvgrageMeter()
with torch.no_grad():
nao.eval()
for (step, sample) in enumerate(nao_valid_queue):
encoder_input = sample['encoder_input']
encoder_target = sample['encoder_target']
decoder_target = sample['decoder_target']
encoder_input = encoder_input.cuda()
encoder_target = encoder_target.cuda()
decoder_target = decoder_target.cuda()
(predict_value, logits, arch) = nao(encoder_input)
n = encoder_input.size(0)
pairwise_acc = utils.pairwise_accuracy(encoder_target.data.squeeze().tolist(), predict_value.data.squeeze().tolist())
hamming_dis = utils.hamming_distance(decoder_target.data.squeeze().tolist(), arch.data.squeeze().tolist())
pa.update(pairwise_acc, n)
hs.update(hamming_dis, n)
(pa, hs) = (pa.avg, hs.avg)
logging.info('Evaluation on valid data')
logging.info('epoch %04d pairwise accuracy %.6f hamming distance %.6f', epoch, pa, hs)
new_archs = []
max_step_size = 50
predict_step_size = 0
top100_archs = list(map(lambda x: utils.parse_arch_to_seq(x[0], 2) + utils.parse_arch_to_seq(x[1], 2), old_archs[:100]))
nao_infer_dataset = utils.NAODataset(top100_archs, None, False)
nao_infer_queue = torch.utils.data.DataLoader(nao_infer_dataset, batch_size=len(nao_infer_dataset), shuffle=False, pin_memory=True)
while len(new_archs) < args.controller_new_arch:
predict_step_size += 1
logging.info('Generate new architectures with step size %d', predict_step_size)
new_arch_list = []
nao.eval()
for (i, sample) in enumerate(nao_infer_queue):
encoder_input = sample['encoder_input']
encoder_input = encoder_input.cuda()
nao.zero_grad()
new_arch = nao.generate_new_arch(encoder_input, predict_step_size, direction='+')
new_arch_list.extend(new_arch.data.squeeze().tolist())
new_arch = new_arch_list
for arch in new_arch:
if arch not in encoder_input and arch not in new_archs:
new_archs.append(arch)
if len(new_archs) >= args.controller_new_arch:
break
logging.info('%d new archs generated now', len(new_archs))
if predict_step_size > max_step_size:
break
new_archs = list(map(lambda x: utils.parse_seq_to_arch(x, 2), new_archs))
num_new_archs = len(new_archs)
logging.info('Generate %d new archs', num_new_archs)
if args.controller_replace:
new_arch_pool = old_archs[:len(old_archs) - (num_new_archs + args.controller_random_arch)] + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
elif args.controller_discard:
new_arch_pool = old_archs[:100] + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
else:
new_arch_pool = old_archs + new_archs + utils.generate_arch(args.controller_random_arch, 5, 5)
logging.info('Totally %d architectures now to train', len(new_arch_pool))
child_arch_pool = new_arch_pool
with open(os.path.join(args.output_dir, 'arch_pool'), 'w') as f:
for arch in new_arch_pool:
arch = ' '.join(map(str, arch[0] + arch[1]))
f.write('{}\n'.format(arch))
if args.child_sample_policy == 'params':
child_arch_pool_prob = []
for arch in child_arch_pool:
if args.dataset == 'cifar10':
tmp_model = NASNetworkCIFAR(args, 10, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
elif args.dataset == 'cifar100':
tmp_model = NASNetworkCIFAR(args, 100, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
else:
tmp_model = NASNetworkImageNet(args, 1000, args.child_layers, args.child_nodes, args.child_channels, args.child_keep_prob, args.child_drop_path_keep_prob, args.child_use_aux_head, args.steps, arch)
child_arch_pool_prob.append(utils.count_parameters_in_MB(tmp_model))
del tmp_model
else:
child_arch_pool_prob = None
|
eval-nas
|
positive
|
def on_pause(self):
if self.state == GameState.paused:
<DeepExtract>
self.state = GameState.running
self.status_text.setText('Playing')
self.pause_button.setText('Pause')
self.timer.start(self.timer_interval)
</DeepExtract>
elif self.state == GameState.running:
<DeepExtract>
self.state = GameState.paused
self.status_text.setText('Paused')
self.pause_button.setText('Resume')
self.timer.stop()
</DeepExtract>
|
def on_pause(self):
if self.state == GameState.paused:
self.state = GameState.running
self.status_text.setText('Playing')
self.pause_button.setText('Pause')
self.timer.start(self.timer_interval)
elif self.state == GameState.running:
self.state = GameState.paused
self.status_text.setText('Paused')
self.pause_button.setText('Resume')
self.timer.stop()
</DeepExtract>
|
code-for-blog
|
positive
|
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
return ver
except NotThisMethod:
pass
try:
<DeepExtract>
try:
with open(versionfile_abs) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod('unable to read _version.py')
mo = re.search("version_json = '''\\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
mo = re.search("version_json = '''\\r\\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
raise NotThisMethod('no version_json in _version.py')
ver = json.loads(mo.group(1))
</DeepExtract>
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
<DeepExtract>
if pieces['error']:
ver = {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not cfg.style or cfg.style == 'default':
cfg.style = 'pep440'
if cfg.style == 'pep440':
rendered = render_pep440(pieces)
elif cfg.style == 'pep440-pre':
rendered = render_pep440_pre(pieces)
elif cfg.style == 'pep440-post':
rendered = render_pep440_post(pieces)
elif cfg.style == 'pep440-old':
rendered = render_pep440_old(pieces)
elif cfg.style == 'git-describe':
rendered = render_git_describe(pieces)
elif cfg.style == 'git-describe-long':
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % cfg.style)
ver = {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
</DeepExtract>
if verbose:
print('got version from VCS %s' % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
<DeepExtract>
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(cfg.parentdir_prefix):
ver = {'version': dirname[len(cfg.parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None}
else:
rootdirs.append(root)
root = os.path.dirname(root)
if verbose:
print('Tried directories %s but none started with prefix %s' % (str(rootdirs), cfg.parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
</DeepExtract>
if verbose:
print('got version from parentdir %s' % ver)
return ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
|
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
return ver
except NotThisMethod:
pass
try:
try:
with open(versionfile_abs) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod('unable to read _version.py')
mo = re.search("version_json = '''\\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
mo = re.search("version_json = '''\\r\\n(.*)''' # END VERSION_JSON", contents, re.M | re.S)
if not mo:
raise NotThisMethod('no version_json in _version.py')
ver = json.loads(mo.group(1))
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
if pieces['error']:
ver = {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None}
if not cfg.style or cfg.style == 'default':
cfg.style = 'pep440'
if cfg.style == 'pep440':
rendered = render_pep440(pieces)
elif cfg.style == 'pep440-pre':
rendered = render_pep440_pre(pieces)
elif cfg.style == 'pep440-post':
rendered = render_pep440_post(pieces)
elif cfg.style == 'pep440-old':
rendered = render_pep440_old(pieces)
elif cfg.style == 'git-describe':
rendered = render_git_describe(pieces)
elif cfg.style == 'git-describe-long':
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % cfg.style)
ver = {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')}
if verbose:
print('got version from VCS %s' % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(cfg.parentdir_prefix):
ver = {'version': dirname[len(cfg.parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None}
else:
rootdirs.append(root)
root = os.path.dirname(root)
if verbose:
print('Tried directories %s but none started with prefix %s' % (str(rootdirs), cfg.parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
if verbose:
print('got version from parentdir %s' % ver)
return ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
|
cusignal
|
positive
|
def __init__(self, group: Group, in_irrep: Union[str, IrreducibleRepresentation, Tuple[int]], out_irrep: Union[str, IrreducibleRepresentation, Tuple[int, int]], axis: float=0.0, discretization: DiscretizationArgs=DiscretizationArgs()):
assert isinstance(group, O2)
assert isinstance(axis, float)
self.axis = axis
if isinstance(in_irrep, tuple):
in_irrep = group.irrep(in_irrep[0], in_irrep[1])
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance of IrreducibleRepresentation but {in_irrep} found")
if isinstance(out_irrep, tuple):
out_irrep = group.irrep(out_irrep[0], out_irrep[1])
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance of IrreducibleRepresentation but {in_irrep} found")
self.m = out_irrep.attributes['frequency']
self.n = in_irrep.attributes['frequency']
self.fi = in_irrep.attributes['flip_frequency']
self.fo = out_irrep.attributes['flip_frequency']
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
assert self.m > 0 and self.n > 0 and (self.fi == 1) and (self.fo == 1)
self.s = []
self.invert = 0
for s in [0, 1]:
mu = self.m - self.n * (-1) ** s
self.mu.append(mu)
self.s.append(s)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert self.m == 0 and self.fi == 1
self.invert = self.fo
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert self.n == 0 and self.fo == 1
self.invert = self.fi
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert self.n == 0 and self.m == 0
self.invert = (self.fi + self.fo) % 2
mu = self.m - self.n
if mu > 0 or self.invert == 0:
self.mu.append(mu)
self.dim = len(self.mu)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
s = self.s[i]
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
<DeepExtract>
inverter = {'u': 't', 't': 'u'}
if 't' == 't' and self.invert:
sign = -1
else:
sign = 1
out[0, 0, :] = sign * homogenized_cheby(mu, inverter['t'] if self.invert else 't')
</DeepExtract>
out[0, 1, :] = -(-1) ** s * cheby('u', mu, self.invert)
<DeepExtract>
inverter = {'u': 't', 't': 'u'}
if 'u' == 't' and self.invert:
sign = -1
else:
sign = 1
out[1, 0, :] = sign * homogenized_cheby(mu, inverter['u'] if self.invert else 'u')
</DeepExtract>
out[1, 1, :] = (-1) ** s * cheby('t', mu, self.invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
out[0, 0, :] = (-1) ** self.invert * homogenized_cheby(mu, 'u' if self.invert else 't')
out[0, 1, :] = homogenized_cheby(mu, 't' if self.invert else 'u')
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
out[0, 0, :] = (-1) ** self.invert * homogenized_cheby(mu, 'u' if self.invert else 't')
out[1, 0, :] = homogenized_cheby(mu, 't' if self.invert else 'u')
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = homogenized_cheby(mu, 'u' if self.invert else 't').reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f'Shape {self.shape} not recognized!')
if axis != 0:
so2 = SO2(1)
matrix = so2.irrep(1)(axis)
coefficients = [transform_polynomial(element, matrix) for element in coefficients]
super().__init__(coefficients, discretization)
|
def __init__(self, group: Group, in_irrep: Union[str, IrreducibleRepresentation, Tuple[int]], out_irrep: Union[str, IrreducibleRepresentation, Tuple[int, int]], axis: float=0.0, discretization: DiscretizationArgs=DiscretizationArgs()):
assert isinstance(group, O2)
assert isinstance(axis, float)
self.axis = axis
if isinstance(in_irrep, tuple):
in_irrep = group.irrep(in_irrep[0], in_irrep[1])
elif isinstance(in_irrep, str):
in_irrep = group.irreps[in_irrep]
elif not isinstance(in_irrep, IrreducibleRepresentation):
raise ValueError(f"'in_irrep' should be a non-negative integer, a string or an instance of IrreducibleRepresentation but {in_irrep} found")
if isinstance(out_irrep, tuple):
out_irrep = group.irrep(out_irrep[0], out_irrep[1])
elif isinstance(out_irrep, str):
out_irrep = group.irreps[out_irrep]
elif not isinstance(out_irrep, IrreducibleRepresentation):
raise ValueError(f"'out_irrep' should be a non-negative integer, a string or an instance of IrreducibleRepresentation but {in_irrep} found")
self.m = out_irrep.attributes['frequency']
self.n = in_irrep.attributes['frequency']
self.fi = in_irrep.attributes['flip_frequency']
self.fo = out_irrep.attributes['flip_frequency']
self.mu = []
if in_irrep.size == 2 and out_irrep.size == 2:
assert self.m > 0 and self.n > 0 and (self.fi == 1) and (self.fo == 1)
self.s = []
self.invert = 0
for s in [0, 1]:
mu = self.m - self.n * (-1) ** s
self.mu.append(mu)
self.s.append(s)
elif in_irrep.size == 2 and out_irrep.size == 1:
assert self.m == 0 and self.fi == 1
self.invert = self.fo
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 2:
assert self.n == 0 and self.fo == 1
self.invert = self.fi
mu = self.n + self.m
self.mu.append(mu)
elif in_irrep.size == 1 and out_irrep.size == 1:
assert self.n == 0 and self.m == 0
self.invert = (self.fi + self.fo) % 2
mu = self.m - self.n
if mu > 0 or self.invert == 0:
self.mu.append(mu)
self.dim = len(self.mu)
self.group = group
self.in_irrep = in_irrep
self.out_irrep = out_irrep
self.shape = (out_irrep.size, in_irrep.size)
coefficients = []
if self.shape[0] == 2 and self.shape[1] == 2:
for i in range(self.dim):
s = self.s[i]
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
inverter = {'u': 't', 't': 'u'}
if 't' == 't' and self.invert:
sign = -1
else:
sign = 1
out[0, 0, :] = sign * homogenized_cheby(mu, inverter['t'] if self.invert else 't')
out[0, 1, :] = -(-1) ** s * cheby('u', mu, self.invert)
inverter = {'u': 't', 't': 'u'}
if 'u' == 't' and self.invert:
sign = -1
else:
sign = 1
out[1, 0, :] = sign * homogenized_cheby(mu, inverter['u'] if self.invert else 'u')
out[1, 1, :] = (-1) ** s * cheby('t', mu, self.invert)
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 2:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
out[0, 0, :] = (-1) ** self.invert * homogenized_cheby(mu, 'u' if self.invert else 't')
out[0, 1, :] = homogenized_cheby(mu, 't' if self.invert else 'u')
coefficients.append(out)
elif self.shape[0] == 2 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = np.empty(self.shape + (abs(mu) + 1,))
out[0, 0, :] = (-1) ** self.invert * homogenized_cheby(mu, 'u' if self.invert else 't')
out[1, 0, :] = homogenized_cheby(mu, 't' if self.invert else 'u')
coefficients.append(out)
elif self.shape[0] == 1 and self.shape[1] == 1:
for i in range(self.dim):
mu = self.mu[i]
out = homogenized_cheby(mu, 'u' if self.invert else 't').reshape(1, 1, -1)
coefficients.append(out)
else:
raise ValueError(f'Shape {self.shape} not recognized!')
if axis != 0:
so2 = SO2(1)
matrix = so2.irrep(1)(axis)
coefficients = [transform_polynomial(element, matrix) for element in coefficients]
super().__init__(coefficients, discretization)
|
e2cnn
|
positive
|
def UTCUsecToWeek(seconds, day_of_week):
"""Converts UTC seconds to last <day_of_week>.
Args:
seconds: Date and time in UTC seconds.
day_of_week: Last <day_of_week> to go back to. Sunday is 0.
Returns:
UTC seconds of last <day_of_week>.
"""
seconds = float(seconds) / 1000000
<DeepExtract>
try:
if utc:
date = datetime.datetime.utcfromtimestamp(seconds + _TIME_DIFFERENCE_UTC_PST)
else:
date = datetime.datetime.fromtimestamp(seconds + _TIME_DIFFERENCE_UTC_PST)
except ValueError as e:
raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None)
</DeepExtract>
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
current_day_of_week = (date.weekday() + 1) % 7
if current_day_of_week >= day_of_week:
date += datetime.timedelta(days=day_of_week - current_day_of_week)
else:
days_back = 7 - (day_of_week - current_day_of_week)
date += datetime.timedelta(days=-days_back)
return (time.mktime(date.timetuple()) - _TIME_DIFFERENCE_UTC_PST) * 1000000
|
def UTCUsecToWeek(seconds, day_of_week):
"""Converts UTC seconds to last <day_of_week>.
Args:
seconds: Date and time in UTC seconds.
day_of_week: Last <day_of_week> to go back to. Sunday is 0.
Returns:
UTC seconds of last <day_of_week>.
"""
seconds = float(seconds) / 1000000
try:
if utc:
date = datetime.datetime.utcfromtimestamp(seconds + _TIME_DIFFERENCE_UTC_PST)
else:
date = datetime.datetime.fromtimestamp(seconds + _TIME_DIFFERENCE_UTC_PST)
except ValueError as e:
raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None)
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
current_day_of_week = (date.weekday() + 1) % 7
if current_day_of_week >= day_of_week:
date += datetime.timedelta(days=day_of_week - current_day_of_week)
else:
days_back = 7 - (day_of_week - current_day_of_week)
date += datetime.timedelta(days=-days_back)
return (time.mktime(date.timetuple()) - _TIME_DIFFERENCE_UTC_PST) * 1000000
|
encrypted-bigquery-client
|
positive
|
@recipe_folder_and_config(allow_missing_for=['list_checks'])
@arg('--packages', nargs='+', help='Glob for package[s] to build. Default is to build all packages. Can be specified more than once')
@arg('--cache', help='To speed up debugging, use repodata cached locally in\n the provided filename. If the file does not exist, it will be created the\n first time.')
@arg('--list-checks', help='List the linting functions to be used and then\n exit')
@arg('--exclude', nargs='+', help='Exclude this linting function. Can be used\n multiple times.')
@arg('--push-status', action='store_true', help='If set, the lint status will\n be sent to the current commit on github. Also needs --user and --repo to\n be set. Requires the env var GITHUB_TOKEN to be set. Note that pull\n requests from forks will not have access to encrypted variables on\n ci, so this feature may be of limited use.')
@arg('--commit', help='Commit on github on which to update status')
@arg('--push-comment', action='store_true', help='If set, the lint status\n will be posted as a comment in the corresponding pull request (given by\n --pull-request). Also needs --user and --repo to be set. Requires the env\n var GITHUB_TOKEN to be set.')
@arg('--pull-request', type=int, help='Pull request id on github on which to\n post a comment.')
@arg('--user', help='Github user')
@arg('--repo', help='Github repo')
@arg('--git-range', nargs='+', help='Git range (e.g. commits or something like\n "master HEAD" to check commits in HEAD vs master, or just "HEAD" to\n include uncommitted changes). All recipes modified within this range will\n be built if not present in the channel.')
@arg('--full-report', action='store_true', help='Default behavior is to\n summarize the linting results; use this argument to get the full\n results as a TSV printed to stdout.')
@arg('--try-fix', help='Attempt to fix problems where found')
@enable_logging()
@enable_debugging()
@named('lint')
def do_lint(recipe_folder, config, packages='*', cache=None, list_checks=False, exclude=None, push_status=False, user='bioconda', commit=None, push_comment=False, pull_request=None, repo='bioconda-recipes', git_range=None, full_report=False, try_fix=False):
"""
Lint recipes
If --push-status is not set, reports a TSV of linting results to stdout.
Otherwise pushes a commit status to the specified commit on github.
"""
if list_checks:
print('\n'.join((str(check) for check in lint.get_checks())))
sys.exit(0)
config = utils.load_config(config)
if cache is not None:
utils.RepoData().set_cache(cache)
<DeepExtract>
recipes = list(utils.get_recipes(recipe_folder, packages))
logger.info('Considering total of %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
if git_range:
changed_recipes = get_recipes_to_build(git_range, recipe_folder)
logger.info('Constraining to %s git modified recipes%s.', len(changed_recipes), utils.ellipsize_recipes(changed_recipes, recipe_folder))
recipes = [recipe for recipe in recipes if recipe in set(changed_recipes)]
if len(recipes) != len(changed_recipes):
logger.info('Overlap was %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
if not True:
blacklist = utils.get_blacklist(config, recipe_folder)
blacklisted = []
for recipe in recipes:
if os.path.relpath(recipe, recipe_folder) in blacklist:
blacklisted.append(recipe)
if blacklisted:
logger.info('Ignoring %s blacklisted recipes%s.', len(blacklisted), utils.ellipsize_recipes(blacklisted, recipe_folder))
recipes = [recipe for recipe in recipes if recipe not in set(blacklisted)]
logger.info('Processing %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
recipes = recipes
</DeepExtract>
linter = lint.Linter(config, recipe_folder, exclude)
result = linter.lint(recipes, fix=try_fix)
messages = linter.get_messages()
if messages:
print('The following problems have been found:\n')
print(linter.get_report())
if not result:
print('All checks OK')
else:
sys.exit('Errors were found')
|
@recipe_folder_and_config(allow_missing_for=['list_checks'])
@arg('--packages', nargs='+', help='Glob for package[s] to build. Default is to build all packages. Can be specified more than once')
@arg('--cache', help='To speed up debugging, use repodata cached locally in\n the provided filename. If the file does not exist, it will be created the\n first time.')
@arg('--list-checks', help='List the linting functions to be used and then\n exit')
@arg('--exclude', nargs='+', help='Exclude this linting function. Can be used\n multiple times.')
@arg('--push-status', action='store_true', help='If set, the lint status will\n be sent to the current commit on github. Also needs --user and --repo to\n be set. Requires the env var GITHUB_TOKEN to be set. Note that pull\n requests from forks will not have access to encrypted variables on\n ci, so this feature may be of limited use.')
@arg('--commit', help='Commit on github on which to update status')
@arg('--push-comment', action='store_true', help='If set, the lint status\n will be posted as a comment in the corresponding pull request (given by\n --pull-request). Also needs --user and --repo to be set. Requires the env\n var GITHUB_TOKEN to be set.')
@arg('--pull-request', type=int, help='Pull request id on github on which to\n post a comment.')
@arg('--user', help='Github user')
@arg('--repo', help='Github repo')
@arg('--git-range', nargs='+', help='Git range (e.g. commits or something like\n "master HEAD" to check commits in HEAD vs master, or just "HEAD" to\n include uncommitted changes). All recipes modified within this range will\n be built if not present in the channel.')
@arg('--full-report', action='store_true', help='Default behavior is to\n summarize the linting results; use this argument to get the full\n results as a TSV printed to stdout.')
@arg('--try-fix', help='Attempt to fix problems where found')
@enable_logging()
@enable_debugging()
@named('lint')
def do_lint(recipe_folder, config, packages='*', cache=None, list_checks=False, exclude=None, push_status=False, user='bioconda', commit=None, push_comment=False, pull_request=None, repo='bioconda-recipes', git_range=None, full_report=False, try_fix=False):
"""
Lint recipes
If --push-status is not set, reports a TSV of linting results to stdout.
Otherwise pushes a commit status to the specified commit on github.
"""
if list_checks:
print('\n'.join((str(check) for check in lint.get_checks())))
sys.exit(0)
config = utils.load_config(config)
if cache is not None:
utils.RepoData().set_cache(cache)
recipes = list(utils.get_recipes(recipe_folder, packages))
logger.info('Considering total of %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
if git_range:
changed_recipes = get_recipes_to_build(git_range, recipe_folder)
logger.info('Constraining to %s git modified recipes%s.', len(changed_recipes), utils.ellipsize_recipes(changed_recipes, recipe_folder))
recipes = [recipe for recipe in recipes if recipe in set(changed_recipes)]
if len(recipes) != len(changed_recipes):
logger.info('Overlap was %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
if not True:
blacklist = utils.get_blacklist(config, recipe_folder)
blacklisted = []
for recipe in recipes:
if os.path.relpath(recipe, recipe_folder) in blacklist:
blacklisted.append(recipe)
if blacklisted:
logger.info('Ignoring %s blacklisted recipes%s.', len(blacklisted), utils.ellipsize_recipes(blacklisted, recipe_folder))
recipes = [recipe for recipe in recipes if recipe not in set(blacklisted)]
logger.info('Processing %s recipes%s.', len(recipes), utils.ellipsize_recipes(recipes, recipe_folder))
recipes = recipes
linter = lint.Linter(config, recipe_folder, exclude)
result = linter.lint(recipes, fix=try_fix)
messages = linter.get_messages()
if messages:
print('The following problems have been found:\n')
print(linter.get_report())
if not result:
print('All checks OK')
else:
sys.exit('Errors were found')
|
bioconda-utils
|
positive
|
def _aipscc(self, threshold=None, relative=True, istokes=0, ifreq=0):
"""
Make AIPS CC table
Arguments:
istokes (integer): index for Stokes Parameter at which the image will be saved
ifreq (integer): index for Frequency at which the image will be saved
threshold (float): pixels with the absolute intensity smaller than this value will be ignored.
relative (boolean): If true, theshold value will be normalized with the peak intensity of the image.
"""
Nx = self.header['nx']
Ny = self.header['ny']
<DeepExtract>
if 'deg' is None:
'deg' = self.angunit
dx = self.header['dx']
dy = self.header['dy']
Nx = self.header['nx']
Ny = self.header['ny']
Nxref = self.header['nxref']
Nyref = self.header['nyref']
xg = dx * (np.arange(Nx) - Nxref + 1) * self.angconv('deg', 'deg')
yg = dy * (np.arange(Ny) - Nyref + 1) * self.angconv('deg', 'deg')
if twodim:
(xg, yg) = np.meshgrid(xg, yg)
(xg, yg) = (xg, yg)
</DeepExtract>
(X, Y) = np.meshgrid(xg, yg)
X = X.reshape(Nx * Ny)
Y = Y.reshape(Nx * Ny)
flux = self.data[istokes, ifreq]
flux = flux.reshape(Nx * Ny)
if threshold is None:
thres = np.finfo(np.float32).eps
elif relative:
thres = self.peak(istokes=istokes, ifreq=ifreq) * threshold
else:
thres = threshold
thres = np.abs(thres)
X = X[flux >= thres]
Y = Y[flux >= thres]
flux = flux[flux >= thres]
c1 = pyfits.Column(name='FLUX', array=flux, format='1E', unit='JY')
c2 = pyfits.Column(name='DELTAX', array=X, format='1E', unit='DEGREES')
c3 = pyfits.Column(name='DELTAY', array=Y, format='1E', unit='DEGREES')
c4 = pyfits.Column(name='MAJOR AX', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c5 = pyfits.Column(name='MINOR AX', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c6 = pyfits.Column(name='POSANGLE', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c7 = pyfits.Column(name='TYPE OBJ', array=np.zeros(len(flux)), format='1E', unit='CODE')
tab = pyfits.BinTableHDU.from_columns([c1, c2, c3, c4, c5, c6, c7])
return tab
|
def _aipscc(self, threshold=None, relative=True, istokes=0, ifreq=0):
"""
Make AIPS CC table
Arguments:
istokes (integer): index for Stokes Parameter at which the image will be saved
ifreq (integer): index for Frequency at which the image will be saved
threshold (float): pixels with the absolute intensity smaller than this value will be ignored.
relative (boolean): If true, theshold value will be normalized with the peak intensity of the image.
"""
Nx = self.header['nx']
Ny = self.header['ny']
if 'deg' is None:
'deg' = self.angunit
dx = self.header['dx']
dy = self.header['dy']
Nx = self.header['nx']
Ny = self.header['ny']
Nxref = self.header['nxref']
Nyref = self.header['nyref']
xg = dx * (np.arange(Nx) - Nxref + 1) * self.angconv('deg', 'deg')
yg = dy * (np.arange(Ny) - Nyref + 1) * self.angconv('deg', 'deg')
if twodim:
(xg, yg) = np.meshgrid(xg, yg)
(xg, yg) = (xg, yg)
(X, Y) = np.meshgrid(xg, yg)
X = X.reshape(Nx * Ny)
Y = Y.reshape(Nx * Ny)
flux = self.data[istokes, ifreq]
flux = flux.reshape(Nx * Ny)
if threshold is None:
thres = np.finfo(np.float32).eps
elif relative:
thres = self.peak(istokes=istokes, ifreq=ifreq) * threshold
else:
thres = threshold
thres = np.abs(thres)
X = X[flux >= thres]
Y = Y[flux >= thres]
flux = flux[flux >= thres]
c1 = pyfits.Column(name='FLUX', array=flux, format='1E', unit='JY')
c2 = pyfits.Column(name='DELTAX', array=X, format='1E', unit='DEGREES')
c3 = pyfits.Column(name='DELTAY', array=Y, format='1E', unit='DEGREES')
c4 = pyfits.Column(name='MAJOR AX', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c5 = pyfits.Column(name='MINOR AX', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c6 = pyfits.Column(name='POSANGLE', array=np.zeros(len(flux)), format='1E', unit='DEGREES')
c7 = pyfits.Column(name='TYPE OBJ', array=np.zeros(len(flux)), format='1E', unit='CODE')
tab = pyfits.BinTableHDU.from_columns([c1, c2, c3, c4, c5, c6, c7])
return tab
|
eat
|
positive
|
def main(argv):
del argv
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
<DeepExtract>
def _load_env():
raw_env = bsuite.load_and_record(bsuite_id=bsuite_id, save_path=FLAGS.save_path, logging_mode=FLAGS.logging_mode, overwrite=FLAGS.overwrite)
if FLAGS.verbose:
raw_env = terminal_logging.wrap_environment(raw_env, log_every=True)
return gym_wrapper.GymFromDMEnv(raw_env)
env = dummy_vec_env.DummyVecEnv([_load_env])
ppo2.learn(env=env, network=FLAGS.network, lr=FLAGS.learning_rate, total_timesteps=FLAGS.total_timesteps, nsteps=FLAGS.nsteps, gamma=FLAGS.agent_discount)
return bsuite_id
</DeepExtract>
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
|
def main(argv):
del argv
bsuite_id = FLAGS.bsuite_id
if bsuite_id in sweep.SWEEP:
print(f'Running single experiment: bsuite_id={bsuite_id}.')
def _load_env():
raw_env = bsuite.load_and_record(bsuite_id=bsuite_id, save_path=FLAGS.save_path, logging_mode=FLAGS.logging_mode, overwrite=FLAGS.overwrite)
if FLAGS.verbose:
raw_env = terminal_logging.wrap_environment(raw_env, log_every=True)
return gym_wrapper.GymFromDMEnv(raw_env)
env = dummy_vec_env.DummyVecEnv([_load_env])
ppo2.learn(env=env, network=FLAGS.network, lr=FLAGS.learning_rate, total_timesteps=FLAGS.total_timesteps, nsteps=FLAGS.nsteps, gamma=FLAGS.agent_discount)
return bsuite_id
elif hasattr(sweep, bsuite_id):
bsuite_sweep = getattr(sweep, bsuite_id)
print(f'Running sweep over bsuite_id in sweep.{bsuite_sweep}')
FLAGS.verbose = False
pool.map_mpi(run, bsuite_sweep)
else:
raise ValueError(f'Invalid flag: bsuite_id={bsuite_id}.')
|
bsuite
|
positive
|
def call_method(self, method, *args, **kwargs):
"""Call a method."""
if len(self._method_calls) > 5:
raise RuntimeError('recursion level too deep')
message = jsonrpc.create_request(method, args)
<DeepExtract>
if self._socket is None:
raise RuntimeError('not connected')
if not jsonrpc.check_message(message):
raise ValueError('invalid JSON-RPC message')
self._outgoing.append(message)
if not self._write_notifier.isEnabled():
self._write_notifier.setEnabled(True)
</DeepExtract>
replies = []
def method_response(message, client):
replies.append(message)
def method_timeout():
reply = jsonrpc.create_error(message, jsonrpc.SERVER_ERROR, 'Method call timed out')
replies.append(reply)
timeout = kwargs.pop('timeout', self.timeout)
if timeout:
timer = QTimer(self)
timer.setInterval(timeout * 1000)
timer.setSingleShot(True)
timer.timeout.connect(method_timeout)
timer.start()
self._method_calls[message['id']] = method_response
loop = QEventLoop()
mask = QEventLoop.ExcludeUserInputEvents | QEventLoop.WaitForMoreEvents
while True:
loop.processEvents(mask)
if replies:
break
if timeout:
timer.stop()
reply = replies[0]
del self._method_calls[message['id']]
if reply.get('error'):
raise QJsonRpcError(reply['error'])
self.message = reply
return reply.get('result')
|
def call_method(self, method, *args, **kwargs):
"""Call a method."""
if len(self._method_calls) > 5:
raise RuntimeError('recursion level too deep')
message = jsonrpc.create_request(method, args)
if self._socket is None:
raise RuntimeError('not connected')
if not jsonrpc.check_message(message):
raise ValueError('invalid JSON-RPC message')
self._outgoing.append(message)
if not self._write_notifier.isEnabled():
self._write_notifier.setEnabled(True)
replies = []
def method_response(message, client):
replies.append(message)
def method_timeout():
reply = jsonrpc.create_error(message, jsonrpc.SERVER_ERROR, 'Method call timed out')
replies.append(reply)
timeout = kwargs.pop('timeout', self.timeout)
if timeout:
timer = QTimer(self)
timer.setInterval(timeout * 1000)
timer.setSingleShot(True)
timer.timeout.connect(method_timeout)
timer.start()
self._method_calls[message['id']] = method_response
loop = QEventLoop()
mask = QEventLoop.ExcludeUserInputEvents | QEventLoop.WaitForMoreEvents
while True:
loop.processEvents(mask)
if replies:
break
if timeout:
timer.stop()
reply = replies[0]
del self._method_calls[message['id']]
if reply.get('error'):
raise QJsonRpcError(reply['error'])
self.message = reply
return reply.get('result')
|
bluepass
|
positive
|
def __call__(self, img, points=None, labels=None):
if isinstance(img, list):
for i in range(len(img)):
if random.random() < self.p_h:
<DeepExtract>
img[i] = F.hflip(img[i])
if points[i] is not None and len(labels[i]):
points[i][:, 0] = img[i].size[0] - points[i][:, 0]
(img[i][i], points[i][i], labels[i][i]) = (img[i], points[i], labels[i])
</DeepExtract>
if random.random() < self.p_v:
<DeepExtract>
img[i] = F.vflip(img[i])
if points[i] is not None and len(labels[i]):
points[i][:, 1] = img[i].size[1] - points[i][:, 1]
(img[i][i], points[i][i], labels[i][i]) = (img[i], points[i], labels[i])
</DeepExtract>
else:
if random.random() < self.p_h:
<DeepExtract>
img = F.hflip(img)
if points is not None and len(labels):
points[:, 0] = img.size[0] - points[:, 0]
(img, points, labels) = (img, points, labels)
</DeepExtract>
if random.random() < self.p_v:
<DeepExtract>
img = F.vflip(img)
if points is not None and len(labels):
points[:, 1] = img.size[1] - points[:, 1]
(img, points, labels) = (img, points, labels)
</DeepExtract>
return (img, points, labels)
|
def __call__(self, img, points=None, labels=None):
if isinstance(img, list):
for i in range(len(img)):
if random.random() < self.p_h:
img[i] = F.hflip(img[i])
if points[i] is not None and len(labels[i]):
points[i][:, 0] = img[i].size[0] - points[i][:, 0]
(img[i][i], points[i][i], labels[i][i]) = (img[i], points[i], labels[i])
if random.random() < self.p_v:
img[i] = F.vflip(img[i])
if points[i] is not None and len(labels[i]):
points[i][:, 1] = img[i].size[1] - points[i][:, 1]
(img[i][i], points[i][i], labels[i][i]) = (img[i], points[i], labels[i])
else:
if random.random() < self.p_h:
img = F.hflip(img)
if points is not None and len(labels):
points[:, 0] = img.size[0] - points[:, 0]
(img, points, labels) = (img, points, labels)
if random.random() < self.p_v:
img = F.vflip(img)
if points is not None and len(labels):
points[:, 1] = img.size[1] - points[:, 1]
(img, points, labels) = (img, points, labels)
return (img, points, labels)
|
aerial_wildlife_detection
|
positive
|
def get_server_info(self):
"""Gets a ServerInfo and sends os_info + client version to server"""
os_info = OSInfo.capture()
from biicode.common import __version__
data = (os_info, str(__version__))
serialized_data = Serializer().build(('data', data))
<DeepExtract>
logger.debug('JWT Call %s' % str('get_server_info'))
auth = JWTAuth(self.token) if self.token else None
headers = headers or {}
headers.update(self.custom_headers)
headers['Content-Type'] = 'application/bson'
if serialized_data is not None:
serialized_data = str(encode_bson(serialized_data))
info = self.call('get_server_info', url_params=url_params, data=serialized_data, headers=headers, auth=auth, deserializer=ServerInfo, response=response, timeout=1)
</DeepExtract>
return info
|
def get_server_info(self):
"""Gets a ServerInfo and sends os_info + client version to server"""
os_info = OSInfo.capture()
from biicode.common import __version__
data = (os_info, str(__version__))
serialized_data = Serializer().build(('data', data))
logger.debug('JWT Call %s' % str('get_server_info'))
auth = JWTAuth(self.token) if self.token else None
headers = headers or {}
headers.update(self.custom_headers)
headers['Content-Type'] = 'application/bson'
if serialized_data is not None:
serialized_data = str(encode_bson(serialized_data))
info = self.call('get_server_info', url_params=url_params, data=serialized_data, headers=headers, auth=auth, deserializer=ServerInfo, response=response, timeout=1)
return info
|
client
|
positive
|
def traverse_polyline_by_specific_dist(polyline_to_walk: np.ndarray, l2_dist_to_walk: float) -> Tuple[np.ndarray, bool]:
"""
Walk a distance along a polyline, and return the points along which you walked.
Assumption: polyline is much longer than the distance to walk.
Args:
polyline_to_walk: Numpy array of shape (N,2)
l2_dist_to_walk: Distance to traverse
Returns:
Tuple of (polyline, success flag)
"""
MAX_NUM_PTS_TO_WALK = 100
dense_polyline_to_walk = interp_arc(MAX_NUM_PTS_TO_WALK, polyline_to_walk[:, 0], polyline_to_walk[:, 1])
for i in range(MAX_NUM_PTS_TO_WALK):
<DeepExtract>
assert dense_polyline_to_walk[:i].shape[1] == 2
l2 = float(np.linalg.norm(np.diff(dense_polyline_to_walk[:i], axis=0), axis=1).sum())
</DeepExtract>
if l2 > l2_dist_to_walk:
return (dense_polyline_to_walk[:i], True)
return (dense_polyline_to_walk, False)
|
def traverse_polyline_by_specific_dist(polyline_to_walk: np.ndarray, l2_dist_to_walk: float) -> Tuple[np.ndarray, bool]:
"""
Walk a distance along a polyline, and return the points along which you walked.
Assumption: polyline is much longer than the distance to walk.
Args:
polyline_to_walk: Numpy array of shape (N,2)
l2_dist_to_walk: Distance to traverse
Returns:
Tuple of (polyline, success flag)
"""
MAX_NUM_PTS_TO_WALK = 100
dense_polyline_to_walk = interp_arc(MAX_NUM_PTS_TO_WALK, polyline_to_walk[:, 0], polyline_to_walk[:, 1])
for i in range(MAX_NUM_PTS_TO_WALK):
assert dense_polyline_to_walk[:i].shape[1] == 2
l2 = float(np.linalg.norm(np.diff(dense_polyline_to_walk[:i], axis=0), axis=1).sum())
if l2 > l2_dist_to_walk:
return (dense_polyline_to_walk[:i], True)
return (dense_polyline_to_walk, False)
|
argoverse-api
|
positive
|
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
<DeepExtract>
if len(token[1][2:]) % 2 == 1:
token[1][2:] = '0' + token[1][2:]
try:
bytes = binascii.a2b_hex(token[1][2:])
except:
bytes = None
</DeepExtract>
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 256 + C2IIP2(byte)
return integer
|
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
if len(token[1][2:]) % 2 == 1:
token[1][2:] = '0' + token[1][2:]
try:
bytes = binascii.a2b_hex(token[1][2:])
except:
bytes = None
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 256 + C2IIP2(byte)
return integer
|
Beta
|
positive
|
def test_restrict_access_audit_board_wrong_org(client: FlaskClient, election_id: str, jurisdiction_id: str, round_id: str, audit_board_id: str):
(org_id_2, _) = create_org_and_admin('Org 3', 'aa3@example.com')
set_logged_in_user(client, UserType.AUDIT_ADMIN, 'aa3@example.com')
election_id_2 = create_election(client, organization_id=org_id_2)
(jurisdiction_id_2, _) = create_jurisdiction_and_admin(election_id_2, 'Test Jurisdiction', 'ja3@example.com')
<DeepExtract>
round = Round(id=str(uuid.uuid4()), election_id=election_id_2, round_num=round_num)
db_session.add(round)
db_session.commit()
round_id_2 = str(round.id)
</DeepExtract>
<DeepExtract>
audit_board_id = str(uuid.uuid4())
audit_board = AuditBoard(id=audit_board_id, jurisdiction_id=jurisdiction_id_2, round_id=round_id_2, passphrase=f'passphrase-{audit_board_id}')
db_session.add(audit_board)
db_session.commit()
audit_board_id_2 = str(audit_board.id)
</DeepExtract>
set_logged_in_user(client, UserType.AUDIT_BOARD, audit_board_id_2)
rv = client.get(f'/api/election/{election_id}/jurisdiction/{jurisdiction_id}/round/{round_id}/audit-board/{audit_board_id}/test_auth')
assert rv.status_code == 403
assert json.loads(rv.data) == {'errors': [{'errorType': 'Forbidden', 'message': f'User does not have access to audit board {audit_board_id}'}]}
|
def test_restrict_access_audit_board_wrong_org(client: FlaskClient, election_id: str, jurisdiction_id: str, round_id: str, audit_board_id: str):
(org_id_2, _) = create_org_and_admin('Org 3', 'aa3@example.com')
set_logged_in_user(client, UserType.AUDIT_ADMIN, 'aa3@example.com')
election_id_2 = create_election(client, organization_id=org_id_2)
(jurisdiction_id_2, _) = create_jurisdiction_and_admin(election_id_2, 'Test Jurisdiction', 'ja3@example.com')
round = Round(id=str(uuid.uuid4()), election_id=election_id_2, round_num=round_num)
db_session.add(round)
db_session.commit()
round_id_2 = str(round.id)
audit_board_id = str(uuid.uuid4())
audit_board = AuditBoard(id=audit_board_id, jurisdiction_id=jurisdiction_id_2, round_id=round_id_2, passphrase=f'passphrase-{audit_board_id}')
db_session.add(audit_board)
db_session.commit()
audit_board_id_2 = str(audit_board.id)
set_logged_in_user(client, UserType.AUDIT_BOARD, audit_board_id_2)
rv = client.get(f'/api/election/{election_id}/jurisdiction/{jurisdiction_id}/round/{round_id}/audit-board/{audit_board_id}/test_auth')
assert rv.status_code == 403
assert json.loads(rv.data) == {'errors': [{'errorType': 'Forbidden', 'message': f'User does not have access to audit board {audit_board_id}'}]}
|
arlo
|
positive
|
def visit(obj: CWLObjectType, stagedir: str, basedir: str, copy: bool=False, staged: bool=False) -> None:
stagedir = cast(Optional[str], obj.get('dirname')) or stagedir
tgt = os.path.join(stagedir, cast(str, obj['basename']))
if obj['location'] in self._pathmap:
return
if obj['class'] == 'Directory':
location = cast(str, obj['location'])
if location.startswith('file://'):
resolved = uri_file_path(location)
else:
resolved = location
self._pathmap[location] = MapperEnt(resolved, tgt, 'WritableDirectory' if copy else 'Directory', staged)
if location.startswith('file://'):
staged = False
<DeepExtract>
for ld in cast(List[CWLObjectType], obj.get('listing', [])):
self.visit(ld, tgt, basedir, copy=cast(bool, ld.get('writable', copy)), staged=staged)
</DeepExtract>
elif obj['class'] == 'File':
path = cast(str, obj['location'])
ab = abspath(path, basedir)
if 'contents' in obj and path.startswith('_:'):
self._pathmap[path] = MapperEnt(obj['contents'], tgt, 'CreateWritableFile' if copy else 'CreateFile', staged)
else:
with SourceLine(obj, 'location', ValidationException, _logger.isEnabledFor(logging.DEBUG)):
deref = ab
if urllib.parse.urlsplit(deref).scheme in ['http', 'https']:
(deref, _last_modified) = downloadHttpFile(path)
else:
st = os.lstat(deref)
while stat.S_ISLNK(st.st_mode):
rl = os.readlink(deref)
deref = rl if os.path.isabs(rl) else os.path.join(os.path.dirname(deref), rl)
st = os.lstat(deref)
self._pathmap[path] = MapperEnt(deref, tgt, 'WritableFile' if copy else 'File', staged)
<DeepExtract>
for ld in cast(List[CWLObjectType], obj.get('secondaryFiles', [])):
self.visit(ld, stagedir, basedir, copy=cast(bool, ld.get('writable', copy)), staged=staged)
</DeepExtract>
|
def visit(obj: CWLObjectType, stagedir: str, basedir: str, copy: bool=False, staged: bool=False) -> None:
stagedir = cast(Optional[str], obj.get('dirname')) or stagedir
tgt = os.path.join(stagedir, cast(str, obj['basename']))
if obj['location'] in self._pathmap:
return
if obj['class'] == 'Directory':
location = cast(str, obj['location'])
if location.startswith('file://'):
resolved = uri_file_path(location)
else:
resolved = location
self._pathmap[location] = MapperEnt(resolved, tgt, 'WritableDirectory' if copy else 'Directory', staged)
if location.startswith('file://'):
staged = False
for ld in cast(List[CWLObjectType], obj.get('listing', [])):
self.visit(ld, tgt, basedir, copy=cast(bool, ld.get('writable', copy)), staged=staged)
elif obj['class'] == 'File':
path = cast(str, obj['location'])
ab = abspath(path, basedir)
if 'contents' in obj and path.startswith('_:'):
self._pathmap[path] = MapperEnt(obj['contents'], tgt, 'CreateWritableFile' if copy else 'CreateFile', staged)
else:
with SourceLine(obj, 'location', ValidationException, _logger.isEnabledFor(logging.DEBUG)):
deref = ab
if urllib.parse.urlsplit(deref).scheme in ['http', 'https']:
(deref, _last_modified) = downloadHttpFile(path)
else:
st = os.lstat(deref)
while stat.S_ISLNK(st.st_mode):
rl = os.readlink(deref)
deref = rl if os.path.isabs(rl) else os.path.join(os.path.dirname(deref), rl)
st = os.lstat(deref)
self._pathmap[path] = MapperEnt(deref, tgt, 'WritableFile' if copy else 'File', staged)
for ld in cast(List[CWLObjectType], obj.get('secondaryFiles', [])):
self.visit(ld, stagedir, basedir, copy=cast(bool, ld.get('writable', copy)), staged=staged)
</DeepExtract>
|
cwltool
|
positive
|
def ssim(images1: np.ndarray, images2: np.ndarray):
<DeepExtract>
assert len(images1.shape) == 4
assert images1.shape == images2.shape
assert images1.dtype == np.float32
assert images2.dtype == np.float32
assert images1.max() <= 1 and images1.min() >= 0
assert images2.max() <= 1 and images2.min() >= 0
</DeepExtract>
mean_ssim = 0
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
jobs = []
for (img1, img2) in zip(images1, images2):
s = pool.apply_async(compare_ssim, (img1, img2), dict(data_range=1, multichannel=True, win_size=11, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, K1=0.01 ** 2, K2=0.03 ** 2))
jobs.append(s)
for job in tqdm.tqdm(jobs):
mean_ssim += job.get()
return mean_ssim / images1.shape[0]
|
def ssim(images1: np.ndarray, images2: np.ndarray):
assert len(images1.shape) == 4
assert images1.shape == images2.shape
assert images1.dtype == np.float32
assert images2.dtype == np.float32
assert images1.max() <= 1 and images1.min() >= 0
assert images2.max() <= 1 and images2.min() >= 0
mean_ssim = 0
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
jobs = []
for (img1, img2) in zip(images1, images2):
s = pool.apply_async(compare_ssim, (img1, img2), dict(data_range=1, multichannel=True, win_size=11, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, K1=0.01 ** 2, K2=0.03 ** 2))
jobs.append(s)
for job in tqdm.tqdm(jobs):
mean_ssim += job.get()
return mean_ssim / images1.shape[0]
|
DeepPrivacy
|
positive
|
def delete(config):
saved_err = None
if self.target is None:
<DeepExtract>
try:
self.target = Target(ISCSIFabricModule(), self.iqn, 'lookup')
if self.tpg_list:
del self.tpg_list[:]
if self.tpg_tag_by_gateway_name:
self.tpg_tag_by_gateway_name = {}
for tpg in self.target.tpgs:
self.tpg_list.append(tpg)
network_portals = list(tpg.network_portals)
if network_portals:
ip_address = network_portals[0].ip_address
gateway_name = self._get_gateway_name(ip_address)
if gateway_name:
self.tpg_tag_by_gateway_name[gateway_name] = tpg.tag
else:
self.logger.info("No available network portal for target with iqn of '{}'".format(self.iqn))
except RTSLibError as err:
self.error_msg = err
self.error = True
self.logger.info('(Gateway.load_config) successfully loaded existing target definition')
</DeepExtract>
if self.target:
try:
self.target.delete()
except RTSLibError as err:
self.logger.error('lio target deletion failed {}'.format(err))
saved_err = err
for disk in config.config['targets'][self.iqn]['disks'].keys():
so = lookup_storage_object_by_disk(config, disk)
if so is None:
self.logger.debug('lio disk lookup failed {}')
continue
if so.status == 'activated':
continue
try:
so.delete()
except RTSLibError as err:
self.logger.error('lio disk deletion failed {}'.format(err))
if saved_err is None:
saved_err = err
if saved_err:
raise RTSLibError(saved_err)
|
def delete(config):
saved_err = None
if self.target is None:
try:
self.target = Target(ISCSIFabricModule(), self.iqn, 'lookup')
if self.tpg_list:
del self.tpg_list[:]
if self.tpg_tag_by_gateway_name:
self.tpg_tag_by_gateway_name = {}
for tpg in self.target.tpgs:
self.tpg_list.append(tpg)
network_portals = list(tpg.network_portals)
if network_portals:
ip_address = network_portals[0].ip_address
gateway_name = self._get_gateway_name(ip_address)
if gateway_name:
self.tpg_tag_by_gateway_name[gateway_name] = tpg.tag
else:
self.logger.info("No available network portal for target with iqn of '{}'".format(self.iqn))
except RTSLibError as err:
self.error_msg = err
self.error = True
self.logger.info('(Gateway.load_config) successfully loaded existing target definition')
if self.target:
try:
self.target.delete()
except RTSLibError as err:
self.logger.error('lio target deletion failed {}'.format(err))
saved_err = err
for disk in config.config['targets'][self.iqn]['disks'].keys():
so = lookup_storage_object_by_disk(config, disk)
if so is None:
self.logger.debug('lio disk lookup failed {}')
continue
if so.status == 'activated':
continue
try:
so.delete()
except RTSLibError as err:
self.logger.error('lio disk deletion failed {}'.format(err))
if saved_err is None:
saved_err = err
if saved_err:
raise RTSLibError(saved_err)
|
ceph-iscsi
|
positive
|
def non_local_block(x, name, use_sn):
"""Self-attention (non-local) block.
This method is used to exactly reproduce SAGAN and ignores Gin settings on
weight initialization and spectral normalization.
Args:
x: Input tensor of shape [batch, h, w, c].
name: Name of the variable scope.
use_sn: Apply spectral norm to the weights.
Returns:
A tensor of the same shape after self-attention was applied.
"""
def _spatial_flatten(inputs):
shape = inputs.shape
return tf.reshape(inputs, (-1, shape[1] * shape[2], shape[3]))
with tf.variable_scope(name):
(h, w, num_channels) = x.get_shape().as_list()[1:]
num_channels_attn = num_channels // 8
num_channels_g = num_channels // 2
theta = conv1x1(x, num_channels_attn, name='conv2d_theta', use_sn=use_sn, use_bias=False)
<DeepExtract>
shape = theta.shape
theta = tf.reshape(theta, (-1, shape[1] * shape[2], shape[3]))
</DeepExtract>
phi = conv1x1(x, num_channels_attn, name='conv2d_phi', use_sn=use_sn, use_bias=False)
phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
<DeepExtract>
shape = phi.shape
phi = tf.reshape(phi, (-1, shape[1] * shape[2], shape[3]))
</DeepExtract>
attn = tf.matmul(theta, phi, transpose_b=True)
attn = tf.nn.softmax(attn)
g = conv1x1(x, num_channels_g, name='conv2d_g', use_sn=use_sn, use_bias=False)
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)
<DeepExtract>
shape = g.shape
g = tf.reshape(g, (-1, shape[1] * shape[2], shape[3]))
</DeepExtract>
attn_g = tf.matmul(attn, g)
attn_g = tf.reshape(attn_g, [-1, h, w, num_channels_g])
sigma = tf.get_variable('sigma', [], initializer=tf.zeros_initializer())
attn_g = conv1x1(attn_g, num_channels, name='conv2d_attn_g', use_sn=use_sn, use_bias=False)
return x + sigma * attn_g
|
def non_local_block(x, name, use_sn):
"""Self-attention (non-local) block.
This method is used to exactly reproduce SAGAN and ignores Gin settings on
weight initialization and spectral normalization.
Args:
x: Input tensor of shape [batch, h, w, c].
name: Name of the variable scope.
use_sn: Apply spectral norm to the weights.
Returns:
A tensor of the same shape after self-attention was applied.
"""
def _spatial_flatten(inputs):
shape = inputs.shape
return tf.reshape(inputs, (-1, shape[1] * shape[2], shape[3]))
with tf.variable_scope(name):
(h, w, num_channels) = x.get_shape().as_list()[1:]
num_channels_attn = num_channels // 8
num_channels_g = num_channels // 2
theta = conv1x1(x, num_channels_attn, name='conv2d_theta', use_sn=use_sn, use_bias=False)
shape = theta.shape
theta = tf.reshape(theta, (-1, shape[1] * shape[2], shape[3]))
phi = conv1x1(x, num_channels_attn, name='conv2d_phi', use_sn=use_sn, use_bias=False)
phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
shape = phi.shape
phi = tf.reshape(phi, (-1, shape[1] * shape[2], shape[3]))
attn = tf.matmul(theta, phi, transpose_b=True)
attn = tf.nn.softmax(attn)
g = conv1x1(x, num_channels_g, name='conv2d_g', use_sn=use_sn, use_bias=False)
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)
shape = g.shape
g = tf.reshape(g, (-1, shape[1] * shape[2], shape[3]))
attn_g = tf.matmul(attn, g)
attn_g = tf.reshape(attn_g, [-1, h, w, num_channels_g])
sigma = tf.get_variable('sigma', [], initializer=tf.zeros_initializer())
attn_g = conv1x1(attn_g, num_channels, name='conv2d_attn_g', use_sn=use_sn, use_bias=False)
return x + sigma * attn_g
|
compare_gan
|
positive
|
def get_vm_info_pretty(self):
"""Header + VM info formatted output."""
<DeepExtract>
output = '%-6s %-25s %-20s %-10s %-12s %-23s\n' % ('ID', 'HOSTNAME', 'VMTYPE', 'USER', 'STATUS', 'CLUSTER')
</DeepExtract>
output += self.get_vm_info()
return output
|
def get_vm_info_pretty(self):
"""Header + VM info formatted output."""
output = '%-6s %-25s %-20s %-10s %-12s %-23s\n' % ('ID', 'HOSTNAME', 'VMTYPE', 'USER', 'STATUS', 'CLUSTER')
output += self.get_vm_info()
return output
|
cloud-scheduler
|
positive
|
def setup(self):
if self.config.container_mode:
<DeepExtract>
ppa_commands = self.get_ppa_adding_commands()
if ppa_commands:
self.run_command(' && '.join(ppa_commands))
dependencies = self.get_dependency_packages()
if dependencies:
self.run_command('apt-get update', use_build_dir=False)
run = False
for dep in dependencies:
exists = ''
try:
exists = self.run_command('dpkg -s {} | grep Status'.format(dep), get_output=True, use_build_dir=False)
except subprocess.CalledProcessError:
exists = ''
if exists.strip() != 'Status: install ok installed':
run = True
break
if run:
self.run_command(self.get_apt_install_cmd(dependencies), use_build_dir=False)
else:
logger.debug('Dependencies already installed')
if self.config.image_setup:
for command in self.config.image_setup.get('run', []):
self.run_command(command, use_build_dir=False)
</DeepExtract>
if self.config.needs_clickable_image():
<DeepExtract>
if not self.minimum_version:
return
if not image_exists(self.docker_image):
return
version = 0
try:
format_string = '{{ index .Config.Labels "image_version"}}'
command = "docker inspect --format '{}' {}".format(format_string, self.docker_image)
logger.debug('Checking docker image version via: {}'.format(command))
version_string = run_subprocess_check_output(command)
version = int(version_string)
except (ValueError, subprocess.CalledProcessError):
logger.warn('Could not read the image version from the container')
if version < self.minimum_version:
raise ClickableException('This version of Clickable requires Clickable docker image {} in version {} or higher (found version {}). Please run "clickable update" to update your local images.'.format(self.docker_image, self.minimum_version, version))
</DeepExtract>
if self.needs_customized_container():
<DeepExtract>
logger.debug('Checking dependencies and container setup')
self.check_docker()
commands = []
env_vars = self.config.image_setup.get('env', {})
commands += self.get_ppa_adding_commands()
dependencies = self.get_dependency_packages()
if dependencies:
commands.append('echo set debconf/frontend Noninteractive | debconf-communicate && echo set debconf/priority critical | debconf-communicate')
commands.append('apt-get update && {} && apt-get clean'.format(self.get_apt_install_cmd(dependencies)))
if self.config.image_setup:
commands.extend(self.config.image_setup.get('run', []))
dockerfile_content = self.construct_dockerfile_content(commands, env_vars)
if self.is_dockerfile_outdated(dockerfile_content):
self.create_custom_container(dockerfile_content)
else:
logger.debug('Image already setup')
</DeepExtract>
|
def setup(self):
if self.config.container_mode:
ppa_commands = self.get_ppa_adding_commands()
if ppa_commands:
self.run_command(' && '.join(ppa_commands))
dependencies = self.get_dependency_packages()
if dependencies:
self.run_command('apt-get update', use_build_dir=False)
run = False
for dep in dependencies:
exists = ''
try:
exists = self.run_command('dpkg -s {} | grep Status'.format(dep), get_output=True, use_build_dir=False)
except subprocess.CalledProcessError:
exists = ''
if exists.strip() != 'Status: install ok installed':
run = True
break
if run:
self.run_command(self.get_apt_install_cmd(dependencies), use_build_dir=False)
else:
logger.debug('Dependencies already installed')
if self.config.image_setup:
for command in self.config.image_setup.get('run', []):
self.run_command(command, use_build_dir=False)
if self.config.needs_clickable_image():
if not self.minimum_version:
return
if not image_exists(self.docker_image):
return
version = 0
try:
format_string = '{{ index .Config.Labels "image_version"}}'
command = "docker inspect --format '{}' {}".format(format_string, self.docker_image)
logger.debug('Checking docker image version via: {}'.format(command))
version_string = run_subprocess_check_output(command)
version = int(version_string)
except (ValueError, subprocess.CalledProcessError):
logger.warn('Could not read the image version from the container')
if version < self.minimum_version:
raise ClickableException('This version of Clickable requires Clickable docker image {} in version {} or higher (found version {}). Please run "clickable update" to update your local images.'.format(self.docker_image, self.minimum_version, version))
if self.needs_customized_container():
logger.debug('Checking dependencies and container setup')
self.check_docker()
commands = []
env_vars = self.config.image_setup.get('env', {})
commands += self.get_ppa_adding_commands()
dependencies = self.get_dependency_packages()
if dependencies:
commands.append('echo set debconf/frontend Noninteractive | debconf-communicate && echo set debconf/priority critical | debconf-communicate')
commands.append('apt-get update && {} && apt-get clean'.format(self.get_apt_install_cmd(dependencies)))
if self.config.image_setup:
commands.extend(self.config.image_setup.get('run', []))
dockerfile_content = self.construct_dockerfile_content(commands, env_vars)
if self.is_dockerfile_outdated(dockerfile_content):
self.create_custom_container(dockerfile_content)
else:
logger.debug('Image already setup')
</DeepExtract>
|
clickable
|
positive
|
def GetFont(self):
"""
Return Font that should be used to draw text in this block
"""
font = None
if self.engine.reportFormat.UseListCtrlTextFormat and self.GetListCtrl():
<DeepExtract>
listCtrl = None
</DeepExtract>
if listCtrl.IsVirtual():
attr = listCtrl.OnGetItemAttr(self.rowIndex)
if attr and attr.HasFont():
font = attr.GetFont()
else:
font = listCtrl.GetItemFont(self.rowIndex)
if font and font.IsOk():
return font
else:
return self.GetFormat().GetFont()
|
def GetFont(self):
"""
Return Font that should be used to draw text in this block
"""
font = None
if self.engine.reportFormat.UseListCtrlTextFormat and self.GetListCtrl():
listCtrl = None
if listCtrl.IsVirtual():
attr = listCtrl.OnGetItemAttr(self.rowIndex)
if attr and attr.HasFont():
font = attr.GetFont()
else:
font = listCtrl.GetItemFont(self.rowIndex)
if font and font.IsOk():
return font
else:
return self.GetFormat().GetFont()
|
bookhub
|
positive
|
def test_reporting_reset(self):
<DeepExtract>
argv = ('configure', 'slack', 'https://slack.url', '#test-channel', '--name', 'test-name')
args = self.parser.parse(argv)
data = {'Slack': {'URL': args.URL, 'channel': args.channel, 'name': args.name}}
self.resources.reporting(data)
result = self.resources.preferences.read()
self.assertItemsEqual(result['Reporting'], data)
</DeepExtract>
default = resources.DEFAULT.reporting
self.resources.reporting(default)
result = self.resources.preferences.read()
self.assertItemsEqual(result['Reporting'], default)
|
def test_reporting_reset(self):
argv = ('configure', 'slack', 'https://slack.url', '#test-channel', '--name', 'test-name')
args = self.parser.parse(argv)
data = {'Slack': {'URL': args.URL, 'channel': args.channel, 'name': args.name}}
self.resources.reporting(data)
result = self.resources.preferences.read()
self.assertItemsEqual(result['Reporting'], data)
default = resources.DEFAULT.reporting
self.resources.reporting(default)
result = self.resources.preferences.read()
self.assertItemsEqual(result['Reporting'], default)
|
aeios
|
positive
|
def get_binary_stdout() -> t.BinaryIO:
<DeepExtract>
if _is_binary_writer(sys.stdout, False):
writer = t.cast(t.BinaryIO, sys.stdout)
buf = getattr(sys.stdout, 'buffer', None)
if buf is not None and _is_binary_writer(buf, True):
writer = t.cast(t.BinaryIO, buf)
writer = None
</DeepExtract>
if writer is None:
raise RuntimeError('Was not able to determine binary stream for sys.stdout.')
return writer
|
def get_binary_stdout() -> t.BinaryIO:
if _is_binary_writer(sys.stdout, False):
writer = t.cast(t.BinaryIO, sys.stdout)
buf = getattr(sys.stdout, 'buffer', None)
if buf is not None and _is_binary_writer(buf, True):
writer = t.cast(t.BinaryIO, buf)
writer = None
if writer is None:
raise RuntimeError('Was not able to determine binary stream for sys.stdout.')
return writer
|
click
|
positive
|
def run(self):
self.prepareQueries()
<DeepExtract>
for (i, query) in self.query_files.items():
result_file = os.path.join(self.workDir, 'alignment{0}.out'.format(i))
cmd = self.aligner.get_command(query, self.database, result_file)
self.commands.append(cmd)
</DeepExtract>
self.executeCommands(shell=True)
<DeepExtract>
result_files = os.path.join(self.workDir, 'alignment*.out')
entry_file = os.path.join(self.workDir, 'sid_list.txt')
cmd = ['cut -f2 {0} | cut -f1 -d" " > {1}'.format(result_files, entry_file)]
self.commands = [cmd]
self.executeCommands(shell=True, process_name='collecting seqids')
reference_fasta = os.path.join(self.workDir, 'reference.fasta')
error_log_file = os.path.join(self.workDir, 'blastdbcmd.err')
cmd = self.blastdbcmd.get_command(entry_file, self.database, reference_fasta, error_log_file)
self.commands = [cmd]
self.executeCommands(shell=True, process_name='BLASTDBCMD')
if os.path.exists(reference_fasta) and os.path.getsize(reference_fasta) > 0:
self.references = MBGD_fasta_reader(reference_fasta, self.mbgd_definition_file)
self.logger.debug('{} sequences were added to the reference.'.format(len(self.references)))
else:
self.logger.warning('Fasta file for hit proteins does not exist or is empty. blastdbcmd might have failed.')
</DeepExtract>
<DeepExtract>
for i in range(len(self.query_files)):
result_file = os.path.join(self.workDir, 'alignment{0}.out'.format(i))
self.parse_result(result_file)
</DeepExtract>
|
def run(self):
self.prepareQueries()
for (i, query) in self.query_files.items():
result_file = os.path.join(self.workDir, 'alignment{0}.out'.format(i))
cmd = self.aligner.get_command(query, self.database, result_file)
self.commands.append(cmd)
self.executeCommands(shell=True)
result_files = os.path.join(self.workDir, 'alignment*.out')
entry_file = os.path.join(self.workDir, 'sid_list.txt')
cmd = ['cut -f2 {0} | cut -f1 -d" " > {1}'.format(result_files, entry_file)]
self.commands = [cmd]
self.executeCommands(shell=True, process_name='collecting seqids')
reference_fasta = os.path.join(self.workDir, 'reference.fasta')
error_log_file = os.path.join(self.workDir, 'blastdbcmd.err')
cmd = self.blastdbcmd.get_command(entry_file, self.database, reference_fasta, error_log_file)
self.commands = [cmd]
self.executeCommands(shell=True, process_name='BLASTDBCMD')
if os.path.exists(reference_fasta) and os.path.getsize(reference_fasta) > 0:
self.references = MBGD_fasta_reader(reference_fasta, self.mbgd_definition_file)
self.logger.debug('{} sequences were added to the reference.'.format(len(self.references)))
else:
self.logger.warning('Fasta file for hit proteins does not exist or is empty. blastdbcmd might have failed.')
for i in range(len(self.query_files)):
result_file = os.path.join(self.workDir, 'alignment{0}.out'.format(i))
self.parse_result(result_file)
</DeepExtract>
|
dfast_core
|
positive
|
def Parse(expression):
<DeepExtract>
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or (char.lower() >= 'a' and char.lower() <= 'z'):
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
tokens = result
</DeepExtract>
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
<DeepExtract>
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] == STATE_STRING or (tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x')):
(functioncall, tokens) = [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
(functioncall, tokens) = (None, tokens)
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
(functioncall, tokens) = [[function, arguments], tokens]
</DeepExtract>
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
|
def Parse(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or (char.lower() >= 'a' and char.lower() <= 'z'):
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
tokens = result
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] == STATE_STRING or (tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x')):
(functioncall, tokens) = [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
(functioncall, tokens) = (None, tokens)
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
(functioncall, tokens) = (None, tokens)
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
(functioncall, tokens) = (None, tokens)
(functioncall, tokens) = [[function, arguments], tokens]
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
|
Beta
|
positive
|
def add_first(self, stage_info, dim_in=3, pad=True):
assert len(stage_info) >= 2
channel = stage_info[0]
stride = stage_info[1]
<DeepExtract>
ret = _get_divisible_by(int(int(channel * self.width_ratio)), self.width_divisor, self.width_divisor)
out_depth = ret
</DeepExtract>
kernel = 3
if len(stage_info) > 2:
kernel = stage_info[2]
out = ConvBNRelu(dim_in, out_depth, kernel=kernel, stride=stride, pad=kernel // 2 if pad else 0, no_bias=1, use_relu='relu', bn_type=self.bn_type)
self.last_depth = out_depth
return out
|
def add_first(self, stage_info, dim_in=3, pad=True):
assert len(stage_info) >= 2
channel = stage_info[0]
stride = stage_info[1]
ret = _get_divisible_by(int(int(channel * self.width_ratio)), self.width_divisor, self.width_divisor)
out_depth = ret
kernel = 3
if len(stage_info) > 2:
kernel = stage_info[2]
out = ConvBNRelu(dim_in, out_depth, kernel=kernel, stride=stride, pad=kernel // 2 if pad else 0, no_bias=1, use_relu='relu', bn_type=self.bn_type)
self.last_depth = out_depth
return out
|
bezier_curve_text_spotting
|
positive
|
def get(self, key):
<DeepExtract>
if self.root is None:
self.root = None
if key < self.root.key:
self.root = self._get(self.root.left, key)
elif key > self.root.key:
self.root = self._get(self.root.right, key)
else:
self.root = self.root
</DeepExtract>
if x is None:
return None
else:
return x.val
|
def get(self, key):
if self.root is None:
self.root = None
if key < self.root.key:
self.root = self._get(self.root.left, key)
elif key > self.root.key:
self.root = self._get(self.root.right, key)
else:
self.root = self.root
if x is None:
return None
else:
return x.val
|
CtCI-6th-Edition
|
positive
|
def test_bitarray_random(self):
for a in self.randombitarrays():
sa = a.to01()
for b in self.randombitarrays():
bb = b.copy()
c = bitarray(a)
c.extend(b)
self.assertEqual(c.to01(), sa + bb.to01())
self.assertEqual(c.endian(), a.endian())
self.assertEqual(len(c), len(a) + len(b))
<DeepExtract>
self.assertIsInstance(c, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = c.buffer_info()
self.assertEqual(size, bits2bytes(len(c)))
self.assertEqual(padbits, 8 * size - len(c))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, c.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(c.nbytes, size)
self.assertEqual(c.padbits, padbits)
self.assertEqual(c.readonly, readonly)
self.assertEqual(len(c) + c.padbits, 8 * c.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(c) % 8, 0)
self.assertEqual(len(c), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(c).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(c.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
<DeepExtract>
self.assertEqual(b, bb)
self.assertEqual(b.endian(), bb.endian())
</DeepExtract>
<DeepExtract>
self.assertIsInstance(b, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = b.buffer_info()
self.assertEqual(size, bits2bytes(len(b)))
self.assertEqual(padbits, 8 * size - len(b))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, b.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(b.nbytes, size)
self.assertEqual(b.padbits, padbits)
self.assertEqual(b.readonly, readonly)
self.assertEqual(len(b) + b.padbits, 8 * b.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(b) % 8, 0)
self.assertEqual(len(b), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(b).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(b.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
def test_bitarray_random(self):
for a in self.randombitarrays():
sa = a.to01()
for b in self.randombitarrays():
bb = b.copy()
c = bitarray(a)
c.extend(b)
self.assertEqual(c.to01(), sa + bb.to01())
self.assertEqual(c.endian(), a.endian())
self.assertEqual(len(c), len(a) + len(b))
self.assertIsInstance(c, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = c.buffer_info()
self.assertEqual(size, bits2bytes(len(c)))
self.assertEqual(padbits, 8 * size - len(c))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, c.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(c.nbytes, size)
self.assertEqual(c.padbits, padbits)
self.assertEqual(c.readonly, readonly)
self.assertEqual(len(c) + c.padbits, 8 * c.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(c) % 8, 0)
self.assertEqual(len(c), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(c).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(c.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
self.assertEqual(b, bb)
self.assertEqual(b.endian(), bb.endian())
self.assertIsInstance(b, bitarray)
(ptr, size, endian, padbits, alloc, readonly, buf, exports) = b.buffer_info()
self.assertEqual(size, bits2bytes(len(b)))
self.assertEqual(padbits, 8 * size - len(b))
self.assertTrue(0 <= padbits < 8)
self.assertEqual(endian, b.endian())
self.assertTrue(endian in ('little', 'big'))
self.assertEqual(b.nbytes, size)
self.assertEqual(b.padbits, padbits)
self.assertEqual(b.readonly, readonly)
self.assertEqual(len(b) + b.padbits, 8 * b.nbytes)
if buf:
self.assertEqual(alloc, 0)
self.assertEqual(len(b) % 8, 0)
self.assertEqual(len(b), 8 * size)
self.assertEqual(padbits, 0)
else:
self.assertTrue(alloc >= size)
if ptr == 0:
self.assertEqual(size, 0)
self.assertEqual(alloc, 0)
if type(b).__name__ == 'frozenbitarray':
self.assertEqual(readonly, 1)
if padbits:
b = bitarray(endian=endian)
b.frombytes(b.tobytes()[-1:])
self.assertFalse(b[-padbits:].any())
elif not buf:
self.assertEqual(readonly, 0)
</DeepExtract>
|
bitarray
|
positive
|
def rebalance(self, obs):
if not self.init:
n_pairs = obs.columns.levels[0].shape[0]
action = np.ones(n_pairs)
action[-1] = 0
self.crp = array_normalize(action)
self.gti = np.ones_like(self.crp)
self.init = True
if self.step:
prev_posit = self.get_portfolio_vector(obs, index=-1)
<DeepExtract>
raise NotImplementedError()
</DeepExtract>
return self.update(prev_posit, price_relative)
else:
return self.crp
|
def rebalance(self, obs):
if not self.init:
n_pairs = obs.columns.levels[0].shape[0]
action = np.ones(n_pairs)
action[-1] = 0
self.crp = array_normalize(action)
self.gti = np.ones_like(self.crp)
self.init = True
if self.step:
prev_posit = self.get_portfolio_vector(obs, index=-1)
raise NotImplementedError()
return self.update(prev_posit, price_relative)
else:
return self.crp
|
cryptotrader
|
positive
|
def get_circle_cos(origin, res, size, close_end=False):
if close_end:
res += 1
orgs = np.tile(np.array(origin, dtype=np.float32), res).reshape(-1, 2)
pos = orgs + np.array([0.0, size], dtype=np.float32)
if close_end:
angs = np.arange(res, dtype=np.float32) / (res - 1) * np.radians(360)
else:
angs = np.arange(res, dtype=np.float32) / res * np.radians(360)
<DeepExtract>
x = (np.cos(angs) * (pos[:, 0] - orgs[:, 0]) - np.sin(angs) * (pos[:, 1] - orgs[:, 1])).reshape(-1, 1)
y = (np.sin(angs) * (pos[:, 0] - orgs[:, 0]) + np.cos(angs) * (pos[:, 1] - orgs[:, 1])).reshape(-1, 1)
vecs = np.hstack((x, y))
vecs += np.array(orgs)
circ_pos = vecs
</DeepExtract>
return circ_pos
|
def get_circle_cos(origin, res, size, close_end=False):
if close_end:
res += 1
orgs = np.tile(np.array(origin, dtype=np.float32), res).reshape(-1, 2)
pos = orgs + np.array([0.0, size], dtype=np.float32)
if close_end:
angs = np.arange(res, dtype=np.float32) / (res - 1) * np.radians(360)
else:
angs = np.arange(res, dtype=np.float32) / res * np.radians(360)
x = (np.cos(angs) * (pos[:, 0] - orgs[:, 0]) - np.sin(angs) * (pos[:, 1] - orgs[:, 1])).reshape(-1, 1)
y = (np.sin(angs) * (pos[:, 0] - orgs[:, 0]) + np.cos(angs) * (pos[:, 1] - orgs[:, 1])).reshape(-1, 1)
vecs = np.hstack((x, y))
vecs += np.array(orgs)
circ_pos = vecs
return circ_pos
|
Abnormal
|
positive
|
def remove_item(self, table_name, key_name, key_value, sort_name=None, sort_value=None):
""" Remove item from specified table by pk.
:type table_name: str
:type key_name: str
:param key_value: value of attribute
:type sort_name: str
:param sort_value: value of attribute
"""
<DeepExtract>
table = self.conn.Table(table_name)
</DeepExtract>
key = {key_name: key_value}
if sort_value and sort_name:
key[sort_name] = sort_value
table.delete_item(Key=key)
|
def remove_item(self, table_name, key_name, key_value, sort_name=None, sort_value=None):
""" Remove item from specified table by pk.
:type table_name: str
:type key_name: str
:param key_value: value of attribute
:type sort_name: str
:param sort_value: value of attribute
"""
table = self.conn.Table(table_name)
key = {key_name: key_value}
if sort_value and sort_name:
key[sort_name] = sort_value
table.delete_item(Key=key)
|
aws-syndicate
|
positive
|
@property
def graph(self):
if self.scene_name not in self._CACHED_GRAPHS:
g = nx.DiGraph()
<DeepExtract>
self.controller.step({'action': 'GetReachablePositions'})
assert self.last_action_success
points_slim = self.last_event.metadata['actionReturn']
points = []
for r in [0, 90, 180, 270]:
for horizon in [-30, 0, 30, 60]:
for p in points_slim:
p = copy.copy(p)
p['rotation'] = r
p['horizon'] = horizon
points.append(p)
points = points
</DeepExtract>
for p in points:
<DeepExtract>
if self.get_key(p) in g:
return
existing_nodes = set(g.nodes())
g.add_node(self.get_key(p))
for o in self.possible_neighbor_offsets():
t = (self.get_key(p)[0] + o[0], self.get_key(p)[1] + o[1], self.get_key(p)[2] + o[2], self.get_key(p)[3] + o[3])
if t in existing_nodes:
self._add_from_to_edge(g, self.get_key(p), t)
self._add_from_to_edge(g, t, self.get_key(p))
</DeepExtract>
self._CACHED_GRAPHS[self.scene_name] = g
return self._CACHED_GRAPHS[self.scene_name]
|
@property
def graph(self):
if self.scene_name not in self._CACHED_GRAPHS:
g = nx.DiGraph()
self.controller.step({'action': 'GetReachablePositions'})
assert self.last_action_success
points_slim = self.last_event.metadata['actionReturn']
points = []
for r in [0, 90, 180, 270]:
for horizon in [-30, 0, 30, 60]:
for p in points_slim:
p = copy.copy(p)
p['rotation'] = r
p['horizon'] = horizon
points.append(p)
points = points
for p in points:
if self.get_key(p) in g:
return
existing_nodes = set(g.nodes())
g.add_node(self.get_key(p))
for o in self.possible_neighbor_offsets():
t = (self.get_key(p)[0] + o[0], self.get_key(p)[1] + o[1], self.get_key(p)[2] + o[2], self.get_key(p)[3] + o[3])
if t in existing_nodes:
self._add_from_to_edge(g, self.get_key(p), t)
self._add_from_to_edge(g, t, self.get_key(p))
self._CACHED_GRAPHS[self.scene_name] = g
return self._CACHED_GRAPHS[self.scene_name]
|
allenact
|
positive
|
def test_save_question_with_update_multiple_choice_question(self):
MultipleChoiceQuestion.objects.create(question_id=2, assignment=Assignment.objects.get(assignment_id=1), title='Hideauze', description='Who where the Hideauze?', a='Former Humans', a_is_correct=True, b='Aliens', b_is_correct=False, c='Magical or Supernatural Creatures', c_is_correct=False, d='Transhumanists', d_is_correct=True, e='Heavenly Creatures', e_is_correct=True)
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
<DeepExtract>
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
</DeepExtract>
response = client.post('/teacher/course/1/assignment/1/save_question', {'question_id': 2, 'question_type': settings.MULTIPLECHOICE_QUESTION_TYPE, 'question_num': 1, 'title': 'Sun', 'description': 'Why did humanity leave Earth?', 'a': 'Global Cooling', 'b': 'Abnormal Solar Hibernation', 'c': 'Global Warming', 'd': 'World Peace', 'a_is_correct': True, 'b_is_correct': True, 'marks': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
|
def test_save_question_with_update_multiple_choice_question(self):
MultipleChoiceQuestion.objects.create(question_id=2, assignment=Assignment.objects.get(assignment_id=1), title='Hideauze', description='Who where the Hideauze?', a='Former Humans', a_is_correct=True, b='Aliens', b_is_correct=False, c='Magical or Supernatural Creatures', c_is_correct=False, d='Transhumanists', d_is_correct=True, e='Heavenly Creatures', e_is_correct=True)
kwargs = {'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
client = client
response = client.post('/teacher/course/1/assignment/1/save_question', {'question_id': 2, 'question_type': settings.MULTIPLECHOICE_QUESTION_TYPE, 'question_num': 1, 'title': 'Sun', 'description': 'Why did humanity leave Earth?', 'a': 'Global Cooling', 'b': 'Abnormal Solar Hibernation', 'c': 'Global Warming', 'd': 'World Peace', 'a_is_correct': True, 'b_is_correct': True, 'marks': 1}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'question was saved')
self.assertEqual(array['status'], 'success')
|
academicstoday-django
|
positive
|
def read_odoo_setup(self):
"""Ugly method to extract requirements & version from ugly setup.py.
Primarily designed for 6.0, but works with 6.1 as well.
"""
old_setup = setuptools.setup
old_distutils_setup = distutils.core.setup
def new_setup(*args, **kw):
self.requirements.extend(kw.get('install_requires', ()))
self.version_detected = kw['version']
setuptools.setup = new_setup
distutils.core.setup = new_setup
sys.path.insert(0, '.')
with open(join(self.odoo_dir, 'setup.py'), 'rb') as f:
saved_argv = sys.argv
sys.argv = ['setup.py', 'develop']
try:
imp.load_module('setup', f, 'setup.py', ('.py', 'r', imp.PY_SOURCE))
except SystemExit as exception:
if 'dsextras' in unicode(exception):
raise EnvironmentError('Please first install PyGObject and PyGTK !')
else:
try:
<DeepExtract>
with open(join(self.odoo_dir, 'bin', 'release.py'), 'rb') as f:
mod = imp.load_module('release', f, 'release.py', ('.py', 'r', imp.PY_SOURCE))
self.version_detected = mod.version
</DeepExtract>
except Exception as exc:
raise EnvironmentError('Problem while reading Odoo release.py: %s' % exc)
except ImportError as exception:
if 'babel' in unicode(exception):
raise EnvironmentError('OpenERP setup.py has an unwanted import Babel.\n=> First install Babel on your system or virtualenv :(\n(sudo aptitude install python-babel, or pip install babel)')
else:
raise exception
except Exception as exception:
raise EnvironmentError('Problem while reading Odoo setup.py: %s' % exception)
finally:
sys.argv = saved_argv
sys.path.pop(0)
setuptools.setup = old_setup
distutils.core.setup = old_distutils_setup
<DeepExtract>
pass
</DeepExtract>
|
def read_odoo_setup(self):
"""Ugly method to extract requirements & version from ugly setup.py.
Primarily designed for 6.0, but works with 6.1 as well.
"""
old_setup = setuptools.setup
old_distutils_setup = distutils.core.setup
def new_setup(*args, **kw):
self.requirements.extend(kw.get('install_requires', ()))
self.version_detected = kw['version']
setuptools.setup = new_setup
distutils.core.setup = new_setup
sys.path.insert(0, '.')
with open(join(self.odoo_dir, 'setup.py'), 'rb') as f:
saved_argv = sys.argv
sys.argv = ['setup.py', 'develop']
try:
imp.load_module('setup', f, 'setup.py', ('.py', 'r', imp.PY_SOURCE))
except SystemExit as exception:
if 'dsextras' in unicode(exception):
raise EnvironmentError('Please first install PyGObject and PyGTK !')
else:
try:
with open(join(self.odoo_dir, 'bin', 'release.py'), 'rb') as f:
mod = imp.load_module('release', f, 'release.py', ('.py', 'r', imp.PY_SOURCE))
self.version_detected = mod.version
except Exception as exc:
raise EnvironmentError('Problem while reading Odoo release.py: %s' % exc)
except ImportError as exception:
if 'babel' in unicode(exception):
raise EnvironmentError('OpenERP setup.py has an unwanted import Babel.\n=> First install Babel on your system or virtualenv :(\n(sudo aptitude install python-babel, or pip install babel)')
else:
raise exception
except Exception as exception:
raise EnvironmentError('Problem while reading Odoo setup.py: %s' % exception)
finally:
sys.argv = saved_argv
sys.path.pop(0)
setuptools.setup = old_setup
distutils.core.setup = old_distutils_setup
pass
</DeepExtract>
|
anybox.recipe.odoo
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.