before
stringlengths 0
955k
| after
stringlengths 0
877k
| repo
stringlengths 1
74
| type
stringclasses 1
value |
|---|---|---|---|
def setUp(self):
super(TestAddCommits, self).setUp()
(self.db_fd, filepath) = tempfile.mkstemp()
<DeepExtract>
session = db.getSession('sqlite:///%s' % filepath)
utils.loadYAML(session, './dlrn/tests/samples/commits_1.yaml')
self.session = session
</DeepExtract>
parser = argparse.ArgumentParser()
parser.add_argument('--dev', action='store_true')
parser.add_argument('--run', action='store_true')
self.options = parser.parse_args([])
self.toprocess = []
|
def setUp(self):
super(TestAddCommits, self).setUp()
(self.db_fd, filepath) = tempfile.mkstemp()
session = db.getSession('sqlite:///%s' % filepath)
utils.loadYAML(session, './dlrn/tests/samples/commits_1.yaml')
self.session = session
parser = argparse.ArgumentParser()
parser.add_argument('--dev', action='store_true')
parser.add_argument('--run', action='store_true')
self.options = parser.parse_args([])
self.toprocess = []
|
DLRN
|
positive
|
def test_reports_suppression_of_small_values():
m = Measure('ignored-id', numerator='fish', denominator='litres', small_number_suppression=True)
data = pandas.DataFrame({'fish': [1], 'litres': [100]}, index=['bowl'])
reporter = RecordingReporter()
<DeepExtract>
return m.calculate(data, reporter)
</DeepExtract>
assert 'Suppressed small numbers in column fish' in reporter.msg
|
def test_reports_suppression_of_small_values():
m = Measure('ignored-id', numerator='fish', denominator='litres', small_number_suppression=True)
data = pandas.DataFrame({'fish': [1], 'litres': [100]}, index=['bowl'])
reporter = RecordingReporter()
return m.calculate(data, reporter)
assert 'Suppressed small numbers in column fish' in reporter.msg
|
cohort-extractor
|
positive
|
@classmethod
def add_periods(cls, periods: Iterable['TimePeriodOverview'], footprint_tolerance=1000.0):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = 'day'
crses = {p.footprint_crs for p in periods}
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
raise NotImplementedError('Time summaries use inconsistent CRSes.')
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
(timeline_counter, period) = cls._group_counter_if_needed(timeline_counter, period)
common_time_period = list(periods[0].period_tuple) if periods else [None] * 4
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
if time_period.footprint_geometry and (not time_period.footprint_geometry.is_valid):
_LOG.info('invalid_stored_geometry', summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(0)
this_period = time_period.period_tuple
for (i, elem) in enumerate(common_time_period):
if elem is not None and elem != this_period[i]:
<DeepExtract>
common_time_period[i:] = [None] * (len(common_time_period) - i)
return common_time_period
</DeepExtract>
break
with_valid_geometries = [p for p in periods if p.footprint_count and p.footprint_geometry and p.footprint_geometry.is_valid and (not p.footprint_geometry.is_empty)]
<DeepExtract>
if not with_valid_geometries:
geometry_union = None
try:
geometry_union = shapely.ops.unary_union([p.footprint_geometry for p in with_valid_geometries])
except ValueError:
try:
_LOG.warning('summary.footprint.invalid_union', exc_info=True)
geometry_union = shapely.ops.unary_union([p.footprint_geometry.buffer(0.001) for p in with_valid_geometries])
except ValueError:
_LOG.warning('summary.footprint.invalid_buffered_union', exc_info=True)
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
geometry_union = geometry_union
</DeepExtract>
total_datasets = sum((p.dataset_count for p in periods))
(product_name, year, month, day) = common_time_period
return TimePeriodOverview(product_name=product_name, year=year, month=month, day=day, dataset_count=total_datasets, timeline_dataset_counts=timeline_counter, timeline_period=period, region_dataset_counts=region_counter, time_range=Range(min((r.time_range.begin for r in periods)) if periods else None, max((r.time_range.end for r in periods)) if periods else None), footprint_geometry=geometry_union, footprint_crs=footprint_crs, footprint_count=sum((p.footprint_count for p in with_valid_geometries)), newest_dataset_creation_time=max((p.newest_dataset_creation_time for p in periods if p.newest_dataset_creation_time is not None), default=None), crses=set.union(*(o.crses for o in periods)) if periods else set(), product_refresh_time=max((p.product_refresh_time for p in periods if p.product_refresh_time is not None), default=None), summary_gen_time=min((p.summary_gen_time for p in periods if p.summary_gen_time is not None), default=None), size_bytes=sum((p.size_bytes for p in periods if p.size_bytes is not None)))
|
@classmethod
def add_periods(cls, periods: Iterable['TimePeriodOverview'], footprint_tolerance=1000.0):
periods = [p for p in periods if p is not None and p.dataset_count > 0]
period = 'day'
crses = {p.footprint_crs for p in periods}
if not crses:
footprint_crs = None
elif len(crses) == 1:
[footprint_crs] = crses
else:
raise NotImplementedError('Time summaries use inconsistent CRSes.')
timeline_counter = Counter()
for p in periods:
timeline_counter.update(p.timeline_dataset_counts)
period = p.timeline_period
(timeline_counter, period) = cls._group_counter_if_needed(timeline_counter, period)
common_time_period = list(periods[0].period_tuple) if periods else [None] * 4
region_counter = Counter()
for time_period in periods:
region_counter.update(time_period.region_dataset_counts)
if time_period.footprint_geometry and (not time_period.footprint_geometry.is_valid):
_LOG.info('invalid_stored_geometry', summary=time_period.period_tuple)
time_period.footprint_geometry = time_period.footprint_geometry.buffer(0)
this_period = time_period.period_tuple
for (i, elem) in enumerate(common_time_period):
if elem is not None and elem != this_period[i]:
common_time_period[i:] = [None] * (len(common_time_period) - i)
return common_time_period
break
with_valid_geometries = [p for p in periods if p.footprint_count and p.footprint_geometry and p.footprint_geometry.is_valid and (not p.footprint_geometry.is_empty)]
if not with_valid_geometries:
geometry_union = None
try:
geometry_union = shapely.ops.unary_union([p.footprint_geometry for p in with_valid_geometries])
except ValueError:
try:
_LOG.warning('summary.footprint.invalid_union', exc_info=True)
geometry_union = shapely.ops.unary_union([p.footprint_geometry.buffer(0.001) for p in with_valid_geometries])
except ValueError:
_LOG.warning('summary.footprint.invalid_buffered_union', exc_info=True)
polygonlist = _polygon_chain(with_valid_geometries)
filtered_geom = _filter_geom(polygonlist)
geometry_union = shapely.ops.unary_union(filtered_geom)
if footprint_tolerance is not None:
geometry_union = geometry_union.simplify(footprint_tolerance)
geometry_union = geometry_union
total_datasets = sum((p.dataset_count for p in periods))
(product_name, year, month, day) = common_time_period
return TimePeriodOverview(product_name=product_name, year=year, month=month, day=day, dataset_count=total_datasets, timeline_dataset_counts=timeline_counter, timeline_period=period, region_dataset_counts=region_counter, time_range=Range(min((r.time_range.begin for r in periods)) if periods else None, max((r.time_range.end for r in periods)) if periods else None), footprint_geometry=geometry_union, footprint_crs=footprint_crs, footprint_count=sum((p.footprint_count for p in with_valid_geometries)), newest_dataset_creation_time=max((p.newest_dataset_creation_time for p in periods if p.newest_dataset_creation_time is not None), default=None), crses=set.union(*(o.crses for o in periods)) if periods else set(), product_refresh_time=max((p.product_refresh_time for p in periods if p.product_refresh_time is not None), default=None), summary_gen_time=min((p.summary_gen_time for p in periods if p.summary_gen_time is not None), default=None), size_bytes=sum((p.size_bytes for p in periods if p.size_bytes is not None)))
|
datacube-explorer
|
positive
|
def __proceed(self):
if not self.proceed_event.is_set():
self.context['state'] = 'paused'
log.info("Paused worker thread '%s'", self.name)
self.proceed_event.wait()
if self.terminate:
return False
log.info("Resumed worker thread '%s'", self.name)
<DeepExtract>
if self.delay and self.delay > 0.0:
log.info("Delayed startup/resume of %f second(s) of worker thread '%s'...", self.delay, self.name)
self.context['state'] = 'pending'
self.wake_event.wait(self.delay)
</DeepExtract>
self.context['state'] = 'running'
if self.terminate:
return False
return True
|
def __proceed(self):
if not self.proceed_event.is_set():
self.context['state'] = 'paused'
log.info("Paused worker thread '%s'", self.name)
self.proceed_event.wait()
if self.terminate:
return False
log.info("Resumed worker thread '%s'", self.name)
if self.delay and self.delay > 0.0:
log.info("Delayed startup/resume of %f second(s) of worker thread '%s'...", self.delay, self.name)
self.context['state'] = 'pending'
self.wake_event.wait(self.delay)
self.context['state'] = 'running'
if self.terminate:
return False
return True
|
autopi-core
|
positive
|
def create_hits(self, project, tasks=None, repetition=None):
if not tasks:
cursor = connection.cursor()
query = '\n SELECT\n max(id) id,\n repetition,\n group_id,\n repetition - sum(existing_assignments) remaining_assignments,\n min_rating\n FROM (\n SELECT\n t_rev.id,\n t.group_id,\n t.min_rating,\n p.repetition,\n CASE WHEN ma.id IS NULL OR ma.status IN (%(skipped)s, %(rejected)s, %(expired)s)\n THEN 0\n ELSE 1 END existing_assignments\n FROM crowdsourcing_task t\n INNER JOIN crowdsourcing_project p ON t.project_id = p.id\n INNER JOIN crowdsourcing_task t_rev ON t_rev.group_id = t.group_id\n LEFT OUTER JOIN mturk_mturkhit mh ON mh.task_id = t_rev.id\n LEFT OUTER JOIN mturk_mturkassignment ma ON ma.hit_id = mh.id\n WHERE t.project_id = (%(project_id)s) AND t_rev.exclude_at IS NULL\n AND t_rev.deleted_at IS NULL\n ) t\n GROUP BY group_id, repetition, min_rating HAVING sum(existing_assignments) < repetition;\n '
cursor.execute(query, {'skipped': TaskWorker.STATUS_SKIPPED, 'rejected': TaskWorker.STATUS_REJECTED, 'expired': TaskWorker.STATUS_EXPIRED, 'project_id': project.id})
tasks = cursor.fetchall()
rated_workers = Rating.objects.filter(origin_type=Rating.RATING_REQUESTER).count()
add_boomerang = rated_workers > 0
duration = project.timeout if project.timeout is not None else datetime.timedelta(hours=24)
lifetime = project.deadline - timezone.now() if project.deadline is not None else datetime.timedelta(days=7)
for task in tasks:
<DeepExtract>
task_hash = Hashids(salt=settings.SECRET_KEY, min_length=settings.ID_HASH_MIN_LENGTH)
task_id = task_hash.encode(task[0])
url = self.host + '/mturk/task/?taskId=' + task_id
question = ExternalQuestion(external_url=url, frame_height=frame_height)
question = question
</DeepExtract>
mturk_hit = MTurkHIT.objects.filter(task_id=task[0]).first()
<DeepExtract>
requirements = []
if project.qualification is not None:
requirements += self._mturk_system_qualifications(project.qualification)
(boomerang_qual, success) = self.create_qualification_type(owner_id=project.owner_id, project_id=project.group_id, name='Boomerang Score #{}'.format(project.group_id), flag=FLAG_Q_BOOMERANG, description='No description available')
boomerang = None
if int(round(task[4], 2) * 100) <= int(settings.BOOMERANG_MIDPOINT * 100):
for (i, bucket) in enumerate(WAIT_LIST_BUCKETS):
if int(bucket[1] * 100) <= int(round(task[4], 2) * 100):
(boomerang_blacklist, success) = self.create_qualification_type(owner_id=project.owner_id, name='Boomerang Waitlist #{}-{}'.format(project.group_id, len(WAIT_LIST_BUCKETS) - i), flag=FLAG_Q_BOOMERANG, description='No description available', deny=True, project_id=project.group_id, bucket=bucket)
if success and add_boomerang:
boomerang = BoomerangRequirement(qualification_type_id=boomerang_blacklist.type_id, comparator=OP_DNE, integer_value=None)
requirements.append(boomerang)
else:
boomerang = BoomerangRequirement(qualification_type_id=boomerang_qual.type_id, comparator=OP_GTEQ, integer_value=int(round(task[4], 2) * 100))
if success and add_boomerang:
requirements.append(boomerang)
(qualifications, boomerang_qual) = (Qualifications(requirements), boomerang_qual)
</DeepExtract>
qualifications_mask = 0
if qualifications is not None:
qualifications_mask = FLAG_Q_LOCALE + FLAG_Q_HITS + FLAG_Q_RATE + FLAG_Q_BOOMERANG
<DeepExtract>
hit_type = MTurkHITType.objects.filter(owner_id=project.owner_id, name=project.name, description=self.description, price=Decimal(str(project.price)), duration=duration, qualifications_mask=qualifications_mask, boomerang_threshold=int(round(task[4], 2) * 100)).first()
if hit_type is not None:
(hit_type, success) = (hit_type, True)
reward = Price(project.price)
try:
mturk_ht = self.connection.register_hit_type(title=project.name, description=self.description, reward=reward, duration=duration, keywords=self.keywords, approval_delay=datetime.timedelta(days=2), qual_req=qualifications)[0]
hit_type = MTurkHITType(owner_id=project.owner_id, name=project.name, description=self.description, price=Decimal(str(project.price)), keywords=self.keywords, duration=duration, qualifications_mask=qualifications_mask, boomerang_qualification=boomerang_qual, boomerang_threshold=int(round(task[4], 2) * 100))
hit_type.string_id = mturk_ht.HITTypeId
hit_type.save()
except MTurkRequestError:
(hit_type, success) = (None, False)
(hit_type, success) = (hit_type, True)
</DeepExtract>
if not success:
return 'FAILURE'
if mturk_hit is None:
try:
hit = self.connection.create_hit(hit_type=hit_type.string_id, max_assignments=task[3], lifetime=lifetime, question=question)[0]
<DeepExtract>
self.connection.set_rest_notification(hit_type=hit.HITTypeId, url=self.host + '/api/mturk/notification', event_types=['AssignmentReturned', 'AssignmentAbandoned', 'AssignmentAccepted', 'AssignmentSubmitted'])
</DeepExtract>
mturk_hit = MTurkHIT(hit_id=hit.HITId, hit_type=hit_type, task_id=task[0])
except MTurkRequestError as e:
error = e.errors[0][0]
if error == 'AWS.MechanicalTurk.InsufficientFunds':
message = {'type': 'ERROR', 'detail': 'Insufficient funds on your Mechanical Turk account!', 'code': error}
redis_publisher = RedisPublisher(facility='bot', users=[project.owner])
message = RedisMessage(json.dumps(message))
redis_publisher.publish_message(message)
return 'FAILED'
elif mturk_hit.hit_type_id != hit_type.id:
<DeepExtract>
try:
result = self.connection.change_hit_type_of_hit(hit_id=mturk_hit.hit_id, hit_type=hit_type.string_id)
except MTurkRequestError:
(result, success) = (None, False)
(result, success) = (result, True)
</DeepExtract>
if success:
mturk_hit.hit_type = hit_type
mturk_hit.save()
return 'SUCCESS'
|
def create_hits(self, project, tasks=None, repetition=None):
if not tasks:
cursor = connection.cursor()
query = '\n SELECT\n max(id) id,\n repetition,\n group_id,\n repetition - sum(existing_assignments) remaining_assignments,\n min_rating\n FROM (\n SELECT\n t_rev.id,\n t.group_id,\n t.min_rating,\n p.repetition,\n CASE WHEN ma.id IS NULL OR ma.status IN (%(skipped)s, %(rejected)s, %(expired)s)\n THEN 0\n ELSE 1 END existing_assignments\n FROM crowdsourcing_task t\n INNER JOIN crowdsourcing_project p ON t.project_id = p.id\n INNER JOIN crowdsourcing_task t_rev ON t_rev.group_id = t.group_id\n LEFT OUTER JOIN mturk_mturkhit mh ON mh.task_id = t_rev.id\n LEFT OUTER JOIN mturk_mturkassignment ma ON ma.hit_id = mh.id\n WHERE t.project_id = (%(project_id)s) AND t_rev.exclude_at IS NULL\n AND t_rev.deleted_at IS NULL\n ) t\n GROUP BY group_id, repetition, min_rating HAVING sum(existing_assignments) < repetition;\n '
cursor.execute(query, {'skipped': TaskWorker.STATUS_SKIPPED, 'rejected': TaskWorker.STATUS_REJECTED, 'expired': TaskWorker.STATUS_EXPIRED, 'project_id': project.id})
tasks = cursor.fetchall()
rated_workers = Rating.objects.filter(origin_type=Rating.RATING_REQUESTER).count()
add_boomerang = rated_workers > 0
duration = project.timeout if project.timeout is not None else datetime.timedelta(hours=24)
lifetime = project.deadline - timezone.now() if project.deadline is not None else datetime.timedelta(days=7)
for task in tasks:
task_hash = Hashids(salt=settings.SECRET_KEY, min_length=settings.ID_HASH_MIN_LENGTH)
task_id = task_hash.encode(task[0])
url = self.host + '/mturk/task/?taskId=' + task_id
question = ExternalQuestion(external_url=url, frame_height=frame_height)
question = question
mturk_hit = MTurkHIT.objects.filter(task_id=task[0]).first()
requirements = []
if project.qualification is not None:
requirements += self._mturk_system_qualifications(project.qualification)
(boomerang_qual, success) = self.create_qualification_type(owner_id=project.owner_id, project_id=project.group_id, name='Boomerang Score #{}'.format(project.group_id), flag=FLAG_Q_BOOMERANG, description='No description available')
boomerang = None
if int(round(task[4], 2) * 100) <= int(settings.BOOMERANG_MIDPOINT * 100):
for (i, bucket) in enumerate(WAIT_LIST_BUCKETS):
if int(bucket[1] * 100) <= int(round(task[4], 2) * 100):
(boomerang_blacklist, success) = self.create_qualification_type(owner_id=project.owner_id, name='Boomerang Waitlist #{}-{}'.format(project.group_id, len(WAIT_LIST_BUCKETS) - i), flag=FLAG_Q_BOOMERANG, description='No description available', deny=True, project_id=project.group_id, bucket=bucket)
if success and add_boomerang:
boomerang = BoomerangRequirement(qualification_type_id=boomerang_blacklist.type_id, comparator=OP_DNE, integer_value=None)
requirements.append(boomerang)
else:
boomerang = BoomerangRequirement(qualification_type_id=boomerang_qual.type_id, comparator=OP_GTEQ, integer_value=int(round(task[4], 2) * 100))
if success and add_boomerang:
requirements.append(boomerang)
(qualifications, boomerang_qual) = (Qualifications(requirements), boomerang_qual)
qualifications_mask = 0
if qualifications is not None:
qualifications_mask = FLAG_Q_LOCALE + FLAG_Q_HITS + FLAG_Q_RATE + FLAG_Q_BOOMERANG
hit_type = MTurkHITType.objects.filter(owner_id=project.owner_id, name=project.name, description=self.description, price=Decimal(str(project.price)), duration=duration, qualifications_mask=qualifications_mask, boomerang_threshold=int(round(task[4], 2) * 100)).first()
if hit_type is not None:
(hit_type, success) = (hit_type, True)
reward = Price(project.price)
try:
mturk_ht = self.connection.register_hit_type(title=project.name, description=self.description, reward=reward, duration=duration, keywords=self.keywords, approval_delay=datetime.timedelta(days=2), qual_req=qualifications)[0]
hit_type = MTurkHITType(owner_id=project.owner_id, name=project.name, description=self.description, price=Decimal(str(project.price)), keywords=self.keywords, duration=duration, qualifications_mask=qualifications_mask, boomerang_qualification=boomerang_qual, boomerang_threshold=int(round(task[4], 2) * 100))
hit_type.string_id = mturk_ht.HITTypeId
hit_type.save()
except MTurkRequestError:
(hit_type, success) = (None, False)
(hit_type, success) = (hit_type, True)
if not success:
return 'FAILURE'
if mturk_hit is None:
try:
hit = self.connection.create_hit(hit_type=hit_type.string_id, max_assignments=task[3], lifetime=lifetime, question=question)[0]
self.connection.set_rest_notification(hit_type=hit.HITTypeId, url=self.host + '/api/mturk/notification', event_types=['AssignmentReturned', 'AssignmentAbandoned', 'AssignmentAccepted', 'AssignmentSubmitted'])
mturk_hit = MTurkHIT(hit_id=hit.HITId, hit_type=hit_type, task_id=task[0])
except MTurkRequestError as e:
error = e.errors[0][0]
if error == 'AWS.MechanicalTurk.InsufficientFunds':
message = {'type': 'ERROR', 'detail': 'Insufficient funds on your Mechanical Turk account!', 'code': error}
redis_publisher = RedisPublisher(facility='bot', users=[project.owner])
message = RedisMessage(json.dumps(message))
redis_publisher.publish_message(message)
return 'FAILED'
elif mturk_hit.hit_type_id != hit_type.id:
try:
result = self.connection.change_hit_type_of_hit(hit_id=mturk_hit.hit_id, hit_type=hit_type.string_id)
except MTurkRequestError:
(result, success) = (None, False)
(result, success) = (result, True)
if success:
mturk_hit.hit_type = hit_type
mturk_hit.save()
return 'SUCCESS'
|
daemo
|
positive
|
def run_submission(module, test_models, test_benchmarks, submission_entry):
<DeepExtract>
ml_brain_pool = {}
if submission_entry.model_type == 'BaseModel':
logger.info(f'Start working with base models')
layers = {}
base_model_pool = {}
for model in test_models:
function = lambda model_inst=model.name: module.get_model(model_inst)
base_model_pool[model.name] = LazyLoad(function)
try:
layers[model.name] = module.get_layers(model.name)
except Exception as e:
logging.warning(f'Could not retrieve layer for model {model.name} -- skipping model {model} ({e})')
model_layers = ModelLayers(layers)
ml_brain_pool = MLBrainPool(base_model_pool, model_layers)
else:
logger.info(f'Start working with brain models')
for model in test_models:
ml_brain_pool[model.name] = module.get_model(model.name)
ml_brain_pool = ml_brain_pool
</DeepExtract>
data = []
success = True
try:
for model_entry in test_models:
model_id = model_entry.name
for benchmark_name in test_benchmarks:
score_entry = None
try:
start = datetime.datetime.now()
<DeepExtract>
benchmark = benchmark_pool[benchmark_name]
(benchmark_type, created) = BenchmarkType.get_or_create(identifier=benchmark_name, defaults=dict(order=999))
if created:
try:
parent = BenchmarkType.get(identifier=benchmark.parent)
benchmark_type.parent = parent
benchmark_type.save()
except DoesNotExist:
logger.warning(f'Could not connect benchmark {benchmark_name} to parent {benchmark.parent} since parent does not exist')
if hasattr(benchmark, 'bibtex') and benchmark.bibtex is not None:
bibtex_string = benchmark.bibtex
ref = get_reference(bibtex_string)
if ref:
benchmark_type.reference = ref
benchmark_type.save()
(bench_inst, created) = BenchmarkInstance.get_or_create(benchmark=benchmark_type, version=benchmark.version)
if created:
ceiling = benchmark.ceiling
bench_inst.ceiling = ceiling.sel(aggregation='center')
bench_inst.ceiling_error = ceiling.sel(aggregation='error')
bench_inst.save()
benchmark_entry = bench_inst
</DeepExtract>
(score_entry, created) = Score.get_or_create(benchmark=benchmark_entry, model=model_entry, defaults={'start_timestamp': start})
if not created and score_entry.score_raw is not None:
logger.warning(f'A score for model {model_id} and benchmark {benchmark_name} already exists')
raw = score_entry.score_raw
ceiled = score_entry.score_ceiled
error = score_entry.error
finished = score_entry.end_timestamp
comment = score_entry.comment
else:
if not created:
score_entry.start_timestamp = datetime.datetime.now()
score_entry.comment = None
logger.warning('An entry already exists but was not evaluated successful, we rerun!')
logger.info(f'Scoring {model_id}, id {model_entry.id} on benchmark {benchmark_name}')
model = ml_brain_pool[model_id]
if not model_entry.visual_degrees:
model_entry.visual_degrees = model.visual_degrees()
model_entry.save()
score = score_model(model_id, benchmark_name, model)
logger.info(f'Running benchmark {benchmark_name} on model {model_id} (id {model_entry.id}) produced this score: {score}')
if not hasattr(score, 'ceiling'):
raw = score.sel(aggregation='center').item(0)
ceiled = None
error = None
else:
assert score.raw.sel(aggregation='center') is not None
raw = score.raw.sel(aggregation='center').item(0)
ceiled = score.sel(aggregation='center').item(0)
error = score.sel(aggregation='error').item(0)
finished = datetime.datetime.now()
comment = f'layers: {model.layer_model.region_layer_map}' if submission_entry.model_type == 'BaseModel' else ''
score_entry.end_timestamp = finished
score_entry.error = error
score_entry.score_ceiled = ceiled
score_entry.score_raw = raw
score_entry.comment = comment
score_entry.save()
result = {'Model': model_id, 'Benchmark': benchmark_name, 'raw_result': raw, 'ceiled_result': ceiled, 'error': error, 'finished_time': finished, 'comment': comment}
data.append(result)
except Exception as e:
success = False
error = f'Benchmark {benchmark_name} failed for model {model_id} because of this error: {e}'
logging.error(f'Could not run model {model_id} because of following error')
logging.error(e, exc_info=True)
data.append({'Model': model_id, 'Benchmark': benchmark_name, 'raw_result': 0, 'ceiled_result': 0, 'error': error, 'finished_time': datetime.datetime.now()})
if score_entry:
score_entry.comment = error if len(error) <= SCORE_COMMENT_MAX_LENGTH else error[:int(SCORE_COMMENT_MAX_LENGTH / 2) - 5] + ' [...] ' + error[-int(SCORE_COMMENT_MAX_LENGTH / 2) + 5:]
score_entry.save()
finally:
if success:
submission_entry.status = 'successful'
logger.info(f'Submission is stored as successful')
else:
submission_entry.status = 'failure'
logger.info(f'Submission was not entirely successful (some benchmarks could not be executed)')
submission_entry.save()
return data
|
def run_submission(module, test_models, test_benchmarks, submission_entry):
ml_brain_pool = {}
if submission_entry.model_type == 'BaseModel':
logger.info(f'Start working with base models')
layers = {}
base_model_pool = {}
for model in test_models:
function = lambda model_inst=model.name: module.get_model(model_inst)
base_model_pool[model.name] = LazyLoad(function)
try:
layers[model.name] = module.get_layers(model.name)
except Exception as e:
logging.warning(f'Could not retrieve layer for model {model.name} -- skipping model {model} ({e})')
model_layers = ModelLayers(layers)
ml_brain_pool = MLBrainPool(base_model_pool, model_layers)
else:
logger.info(f'Start working with brain models')
for model in test_models:
ml_brain_pool[model.name] = module.get_model(model.name)
ml_brain_pool = ml_brain_pool
data = []
success = True
try:
for model_entry in test_models:
model_id = model_entry.name
for benchmark_name in test_benchmarks:
score_entry = None
try:
start = datetime.datetime.now()
benchmark = benchmark_pool[benchmark_name]
(benchmark_type, created) = BenchmarkType.get_or_create(identifier=benchmark_name, defaults=dict(order=999))
if created:
try:
parent = BenchmarkType.get(identifier=benchmark.parent)
benchmark_type.parent = parent
benchmark_type.save()
except DoesNotExist:
logger.warning(f'Could not connect benchmark {benchmark_name} to parent {benchmark.parent} since parent does not exist')
if hasattr(benchmark, 'bibtex') and benchmark.bibtex is not None:
bibtex_string = benchmark.bibtex
ref = get_reference(bibtex_string)
if ref:
benchmark_type.reference = ref
benchmark_type.save()
(bench_inst, created) = BenchmarkInstance.get_or_create(benchmark=benchmark_type, version=benchmark.version)
if created:
ceiling = benchmark.ceiling
bench_inst.ceiling = ceiling.sel(aggregation='center')
bench_inst.ceiling_error = ceiling.sel(aggregation='error')
bench_inst.save()
benchmark_entry = bench_inst
(score_entry, created) = Score.get_or_create(benchmark=benchmark_entry, model=model_entry, defaults={'start_timestamp': start})
if not created and score_entry.score_raw is not None:
logger.warning(f'A score for model {model_id} and benchmark {benchmark_name} already exists')
raw = score_entry.score_raw
ceiled = score_entry.score_ceiled
error = score_entry.error
finished = score_entry.end_timestamp
comment = score_entry.comment
else:
if not created:
score_entry.start_timestamp = datetime.datetime.now()
score_entry.comment = None
logger.warning('An entry already exists but was not evaluated successful, we rerun!')
logger.info(f'Scoring {model_id}, id {model_entry.id} on benchmark {benchmark_name}')
model = ml_brain_pool[model_id]
if not model_entry.visual_degrees:
model_entry.visual_degrees = model.visual_degrees()
model_entry.save()
score = score_model(model_id, benchmark_name, model)
logger.info(f'Running benchmark {benchmark_name} on model {model_id} (id {model_entry.id}) produced this score: {score}')
if not hasattr(score, 'ceiling'):
raw = score.sel(aggregation='center').item(0)
ceiled = None
error = None
else:
assert score.raw.sel(aggregation='center') is not None
raw = score.raw.sel(aggregation='center').item(0)
ceiled = score.sel(aggregation='center').item(0)
error = score.sel(aggregation='error').item(0)
finished = datetime.datetime.now()
comment = f'layers: {model.layer_model.region_layer_map}' if submission_entry.model_type == 'BaseModel' else ''
score_entry.end_timestamp = finished
score_entry.error = error
score_entry.score_ceiled = ceiled
score_entry.score_raw = raw
score_entry.comment = comment
score_entry.save()
result = {'Model': model_id, 'Benchmark': benchmark_name, 'raw_result': raw, 'ceiled_result': ceiled, 'error': error, 'finished_time': finished, 'comment': comment}
data.append(result)
except Exception as e:
success = False
error = f'Benchmark {benchmark_name} failed for model {model_id} because of this error: {e}'
logging.error(f'Could not run model {model_id} because of following error')
logging.error(e, exc_info=True)
data.append({'Model': model_id, 'Benchmark': benchmark_name, 'raw_result': 0, 'ceiled_result': 0, 'error': error, 'finished_time': datetime.datetime.now()})
if score_entry:
score_entry.comment = error if len(error) <= SCORE_COMMENT_MAX_LENGTH else error[:int(SCORE_COMMENT_MAX_LENGTH / 2) - 5] + ' [...] ' + error[-int(SCORE_COMMENT_MAX_LENGTH / 2) + 5:]
score_entry.save()
finally:
if success:
submission_entry.status = 'successful'
logger.info(f'Submission is stored as successful')
else:
submission_entry.status = 'failure'
logger.info(f'Submission was not entirely successful (some benchmarks could not be executed)')
submission_entry.save()
return data
|
brain-score
|
positive
|
def apply_tfa_magseries(lcfile, timecol, magcol, errcol, templateinfo, mintemplatedist_arcmin=10.0, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0):
"""This applies the TFA correction to an LC given TFA template information.
Parameters
----------
lcfile : str
This is the light curve file to apply the TFA correction to.
timecol,magcol,errcol : str
These are the column keys in the lcdict for the LC file to apply the TFA
correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming this light curve to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to this light curve before running TFA
on it.
Returns
-------
str
This returns the filename of the light curve file generated after TFA
applications. This is a pickle (that can be read by `lcproc.read_pklc`)
in the same directory as `lcfile`. The `magcol` will be encoded in the
filename, so each `magcol` in `lcfile` gets its own output file.
"""
try:
formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if isinstance(templateinfo, str) and os.path.exists(templateinfo):
with open(templateinfo, 'rb') as infd:
templateinfo = pickle.load(infd)
lcdict = readerfunc(lcfile)
if isinstance(lcdict, (tuple, list)) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
objectid = lcdict['objectid']
tmagseries = templateinfo[magcol]['template_magseries'][:]
if objectid in templateinfo[magcol]['template_objects']:
LOGWARNING('object %s found in the TFA template ensemble, removing...' % objectid)
templateind = templateinfo[magcol]['template_objects'] == objectid
tmagseries = tmagseries[~templateind, :]
object_matches = coordutils.conesearch_kdtree(templateinfo[magcol]['template_radecl_kdtree'], lcdict['objectinfo']['ra'], lcdict['objectinfo']['decl'], mintemplatedist_arcmin / 60.0)
if len(object_matches) > 0:
LOGWARNING('object %s is within %.1f arcminutes of %s template objects. Will remove these objects from the template applied to this object.' % (objectid, mintemplatedist_arcmin, len(object_matches)))
removalind = np.full(templateinfo[magcol]['template_objects'].size, False, dtype=np.bool)
removalind[np.array(object_matches)] = True
tmagseries = tmagseries[~removalind, :]
normal_matrix = np.dot(tmagseries, tmagseries.T)
normal_matrix_inverse = spla.pinv2(normal_matrix)
timebase = templateinfo[magcol]['timebase']
<DeepExtract>
try:
(lcfile, lcformat, lcformatdir, tcol, mcol, ecol, timebase, interpolate_type, sigclip) = (lcfile, lcformat, lcformatdir, timecol, magcol, errcol, timebase, interp, sigclip)
try:
formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
reformed_targetlc = None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
reformed_targetlc = None
lcdict = readerfunc(lcfile)
if isinstance(lcdict, (list, tuple)) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
outdict = {}
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
if normfunc is None:
(ntimes, nmags) = normalize_magseries(times, mags, magsarefluxes=magsarefluxes)
(times, mags, errs) = (ntimes, nmags, errs)
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, sigclip=sigclip)
mags_interpolator = spi.interp1d(stimes, smags, kind=interpolate_type, fill_value='extrapolate')
errs_interpolator = spi.interp1d(stimes, serrs, kind=interpolate_type, fill_value='extrapolate')
interpolated_mags = mags_interpolator(timebase)
interpolated_errs = errs_interpolator(timebase)
magmedian = np.median(interpolated_mags)
renormed_mags = interpolated_mags - magmedian
outdict = {'mags': renormed_mags, 'errs': interpolated_errs, 'origmags': interpolated_mags}
reformed_targetlc = outdict
except Exception:
LOGEXCEPTION('reform LC task failed: %s' % repr((lcfile, lcformat, lcformatdir, timecol, magcol, errcol, timebase, interp, sigclip)))
reformed_targetlc = None
</DeepExtract>
scalar_products = np.dot(tmagseries, reformed_targetlc['mags'])
corrections = np.dot(normal_matrix_inverse, scalar_products)
corrected_magseries = reformed_targetlc['origmags'] - np.dot(tmagseries.T, corrections)
outdict = {'times': timebase, 'mags': corrected_magseries, 'errs': reformed_targetlc['errs'], 'mags_median': np.median(corrected_magseries), 'mags_mad': np.median(np.abs(corrected_magseries - np.median(corrected_magseries))), 'work': {'tmagseries': tmagseries, 'normal_matrix': normal_matrix, 'normal_matrix_inverse': normal_matrix_inverse, 'scalar_products': scalar_products, 'corrections': corrections, 'reformed_targetlc': reformed_targetlc}}
lcdict['tfa'] = outdict
outfile = os.path.join(os.path.dirname(lcfile), '%s-tfa-%s-pklc.pkl' % (squeeze(objectid).replace(' ', '-'), magcol))
with open(outfile, 'wb') as outfd:
pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
|
def apply_tfa_magseries(lcfile, timecol, magcol, errcol, templateinfo, mintemplatedist_arcmin=10.0, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0):
"""This applies the TFA correction to an LC given TFA template information.
Parameters
----------
lcfile : str
This is the light curve file to apply the TFA correction to.
timecol,magcol,errcol : str
These are the column keys in the lcdict for the LC file to apply the TFA
correction to.
templateinfo : dict or str
This is either the dict produced by `tfa_templates_lclist` or the pickle
produced by the same function.
mintemplatedist_arcmin : float
This sets the minimum distance required from the target object for
objects in the TFA template ensemble. Objects closer than this distance
will be removed from the ensemble.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
interp : str
This is passed to scipy.interpolate.interp1d as the kind of
interpolation to use when reforming this light curve to the timebase of
the TFA templates.
sigclip : float or sequence of two floats or None
This is the sigma clip to apply to this light curve before running TFA
on it.
Returns
-------
str
This returns the filename of the light curve file generated after TFA
applications. This is a pickle (that can be read by `lcproc.read_pklc`)
in the same directory as `lcfile`. The `magcol` will be encoded in the
filename, so each `magcol` in `lcfile` gets its own output file.
"""
try:
formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if isinstance(templateinfo, str) and os.path.exists(templateinfo):
with open(templateinfo, 'rb') as infd:
templateinfo = pickle.load(infd)
lcdict = readerfunc(lcfile)
if isinstance(lcdict, (tuple, list)) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
objectid = lcdict['objectid']
tmagseries = templateinfo[magcol]['template_magseries'][:]
if objectid in templateinfo[magcol]['template_objects']:
LOGWARNING('object %s found in the TFA template ensemble, removing...' % objectid)
templateind = templateinfo[magcol]['template_objects'] == objectid
tmagseries = tmagseries[~templateind, :]
object_matches = coordutils.conesearch_kdtree(templateinfo[magcol]['template_radecl_kdtree'], lcdict['objectinfo']['ra'], lcdict['objectinfo']['decl'], mintemplatedist_arcmin / 60.0)
if len(object_matches) > 0:
LOGWARNING('object %s is within %.1f arcminutes of %s template objects. Will remove these objects from the template applied to this object.' % (objectid, mintemplatedist_arcmin, len(object_matches)))
removalind = np.full(templateinfo[magcol]['template_objects'].size, False, dtype=np.bool)
removalind[np.array(object_matches)] = True
tmagseries = tmagseries[~removalind, :]
normal_matrix = np.dot(tmagseries, tmagseries.T)
normal_matrix_inverse = spla.pinv2(normal_matrix)
timebase = templateinfo[magcol]['timebase']
try:
(lcfile, lcformat, lcformatdir, tcol, mcol, ecol, timebase, interpolate_type, sigclip) = (lcfile, lcformat, lcformatdir, timecol, magcol, errcol, timebase, interp, sigclip)
try:
formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
reformed_targetlc = None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
reformed_targetlc = None
lcdict = readerfunc(lcfile)
if isinstance(lcdict, (list, tuple)) and isinstance(lcdict[0], dict):
lcdict = lcdict[0]
outdict = {}
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
if normfunc is None:
(ntimes, nmags) = normalize_magseries(times, mags, magsarefluxes=magsarefluxes)
(times, mags, errs) = (ntimes, nmags, errs)
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, sigclip=sigclip)
mags_interpolator = spi.interp1d(stimes, smags, kind=interpolate_type, fill_value='extrapolate')
errs_interpolator = spi.interp1d(stimes, serrs, kind=interpolate_type, fill_value='extrapolate')
interpolated_mags = mags_interpolator(timebase)
interpolated_errs = errs_interpolator(timebase)
magmedian = np.median(interpolated_mags)
renormed_mags = interpolated_mags - magmedian
outdict = {'mags': renormed_mags, 'errs': interpolated_errs, 'origmags': interpolated_mags}
reformed_targetlc = outdict
except Exception:
LOGEXCEPTION('reform LC task failed: %s' % repr((lcfile, lcformat, lcformatdir, timecol, magcol, errcol, timebase, interp, sigclip)))
reformed_targetlc = None
scalar_products = np.dot(tmagseries, reformed_targetlc['mags'])
corrections = np.dot(normal_matrix_inverse, scalar_products)
corrected_magseries = reformed_targetlc['origmags'] - np.dot(tmagseries.T, corrections)
outdict = {'times': timebase, 'mags': corrected_magseries, 'errs': reformed_targetlc['errs'], 'mags_median': np.median(corrected_magseries), 'mags_mad': np.median(np.abs(corrected_magseries - np.median(corrected_magseries))), 'work': {'tmagseries': tmagseries, 'normal_matrix': normal_matrix, 'normal_matrix_inverse': normal_matrix_inverse, 'scalar_products': scalar_products, 'corrections': corrections, 'reformed_targetlc': reformed_targetlc}}
lcdict['tfa'] = outdict
outfile = os.path.join(os.path.dirname(lcfile), '%s-tfa-%s-pklc.pkl' % (squeeze(objectid).replace(' ', '-'), magcol))
with open(outfile, 'wb') as outfd:
pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL)
return outfile
|
astrobase
|
positive
|
def _doInstall(self):
<DeepExtract>
(url, md5) = self._getUrl()
ball = os.path.join(self.__downloadDir, url)
</DeepExtract>
if cautils.is_tarfile(ball):
if not self.__cygwinPlatform:
<DeepExtract>
tf = cautils.open_tarfile(ball, self.__dosXz)
members = tf.getmembers()
tempdir = tempfile.mkdtemp()
try:
tf.extractall(tempdir)
for m in members:
if m.isdir():
path = self.__pm.mapPath('/' + m.name)
if not os.path.exists(path):
os.makedirs(path, m.mode)
for m in members:
if m.isdir():
path = self.__pm.mapPath('/' + m.name)
if not os.path.exists(path):
os.makedirs(path, m.mode)
else:
path = self.__pm.mapPath('/' + m.name)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.exists(path):
os.chmod(path, 511)
os.remove(path)
if m.issym() and self.__lnExists:
link_target = m.linkname
Process([self.__dosLn, '-s', link_target, path]).run(True)
elif m.islnk() and self.__lnExists:
link_target = m.linkname
mapped_target = self.__pm.mapPath('/' + m.linkname)
if not os.path.exists(mapped_target):
shutil.move(os.path.join(tempdir, link_target), mapped_target)
Process([self.__dosLn, mapped_target, path]).run(True)
else:
shutil.move(os.path.join(tempdir, m.name), path)
finally:
tf.close()
cautils.rmtree(tempdir)
</DeepExtract>
tf = cautils.open_tarfile(ball, self.__dosXz)
if self.__cygwinPlatform:
tf.extractall(self.__absRoot)
members = tf.getmembers()
tf.close()
lst = []
for m in members:
if m.isdir() and (not m.name.endswith('/')):
lst.append(m.name + '/')
else:
lst.append(m.name)
else:
print('{0}: bad tarball {1}. Install failed.'.format(self.__appName, ball), file=sys.stderr)
return
<DeepExtract>
gz_filename = os.path.join(self.__setupDir, '{0}.lst.gz'.format(self.__pkgName))
lst_cr = [x + '\n' for x in lst]
lst_io = io.BytesIO()
lst_io_gz = gzip.GzipFile(fileobj=lst_io, mode='w')
lst_io_gz.writelines([x.encode() for x in lst_cr])
lst_io_gz.close()
lst_gz = open(gz_filename, 'wb')
lst_gz.write(lst_io.getvalue())
lst_gz.close()
lst_io.close()
stat_struct = os.stat(self.__setupIniPath)
atime = stat_struct[7]
mtime = stat_struct[8]
self._touch(gz_filename, (atime, mtime))
</DeepExtract>
status = 1
if not self.__pkgName in self._integrityControl():
status = 0
self.__installed[status][self.__pkgName] = os.path.basename(ball)
<DeepExtract>
file_db = open(self.__installedDbFile, 'w')
file_db.write(self.INSTALLED_DB_MAGIC)
lines = []
for x in list(self.__installed[0].keys()):
lines.append('{0} {1} 0\n'.format(x, self.__installed[0][x]))
file_db.writelines(lines)
if file_db.close():
raise IOError(self.__installedDbFile)
</DeepExtract>
|
def _doInstall(self):
(url, md5) = self._getUrl()
ball = os.path.join(self.__downloadDir, url)
if cautils.is_tarfile(ball):
if not self.__cygwinPlatform:
tf = cautils.open_tarfile(ball, self.__dosXz)
members = tf.getmembers()
tempdir = tempfile.mkdtemp()
try:
tf.extractall(tempdir)
for m in members:
if m.isdir():
path = self.__pm.mapPath('/' + m.name)
if not os.path.exists(path):
os.makedirs(path, m.mode)
for m in members:
if m.isdir():
path = self.__pm.mapPath('/' + m.name)
if not os.path.exists(path):
os.makedirs(path, m.mode)
else:
path = self.__pm.mapPath('/' + m.name)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.exists(path):
os.chmod(path, 511)
os.remove(path)
if m.issym() and self.__lnExists:
link_target = m.linkname
Process([self.__dosLn, '-s', link_target, path]).run(True)
elif m.islnk() and self.__lnExists:
link_target = m.linkname
mapped_target = self.__pm.mapPath('/' + m.linkname)
if not os.path.exists(mapped_target):
shutil.move(os.path.join(tempdir, link_target), mapped_target)
Process([self.__dosLn, mapped_target, path]).run(True)
else:
shutil.move(os.path.join(tempdir, m.name), path)
finally:
tf.close()
cautils.rmtree(tempdir)
tf = cautils.open_tarfile(ball, self.__dosXz)
if self.__cygwinPlatform:
tf.extractall(self.__absRoot)
members = tf.getmembers()
tf.close()
lst = []
for m in members:
if m.isdir() and (not m.name.endswith('/')):
lst.append(m.name + '/')
else:
lst.append(m.name)
else:
print('{0}: bad tarball {1}. Install failed.'.format(self.__appName, ball), file=sys.stderr)
return
gz_filename = os.path.join(self.__setupDir, '{0}.lst.gz'.format(self.__pkgName))
lst_cr = [x + '\n' for x in lst]
lst_io = io.BytesIO()
lst_io_gz = gzip.GzipFile(fileobj=lst_io, mode='w')
lst_io_gz.writelines([x.encode() for x in lst_cr])
lst_io_gz.close()
lst_gz = open(gz_filename, 'wb')
lst_gz.write(lst_io.getvalue())
lst_gz.close()
lst_io.close()
stat_struct = os.stat(self.__setupIniPath)
atime = stat_struct[7]
mtime = stat_struct[8]
self._touch(gz_filename, (atime, mtime))
status = 1
if not self.__pkgName in self._integrityControl():
status = 0
self.__installed[status][self.__pkgName] = os.path.basename(ball)
file_db = open(self.__installedDbFile, 'w')
file_db.write(self.INSTALLED_DB_MAGIC)
lines = []
for x in list(self.__installed[0].keys()):
lines.append('{0} {1} 0\n'.format(x, self.__installed[0][x]))
file_db.writelines(lines)
if file_db.close():
raise IOError(self.__installedDbFile)
</DeepExtract>
|
cyg-apt
|
positive
|
def upload_ballots(self, election_id: str) -> dict[str, Any]:
try:
<DeepExtract>
eel.update_upload_status('Scanning drives')
</DeepExtract>
<DeepExtract>
try:
removable_drives = get_removable_drives()
self._log.trace(f'found {len(removable_drives)} removable drives')
candidate_drives = [self.parse_drive(drive) for drive in removable_drives if os.path.exists(os.path.join(drive, 'artifacts', 'encrypted_ballots')) and os.path.exists(os.path.join(drive, 'artifacts', 'devices'))]
first_candidate = next(iter(candidate_drives), None)
drive_info = eel_success(first_candidate)
except Exception as e:
drive_info = self.handle_error(e)
</DeepExtract>
device_file_name = drive_info['result']['device_file_name']
device_file_path = drive_info['result']['device_file_path']
self._log.debug(f'uploading ballots for {election_id} from {device_file_path} device {device_file_name}')
<DeepExtract>
eel.update_upload_status('Uploading device file')
</DeepExtract>
<DeepExtract>
with open(device_file_path, 'r', encoding='utf-8') as device_file:
ballot_upload = self.create_ballot_upload(election_id, device_file_name, device_file.read())
ballot_upload_result = ballot_upload
</DeepExtract>
if not ballot_upload_result['success']:
return ballot_upload_result
ballots_dir: str = drive_info['result']['ballots_dir']
ballot_files = os.listdir(ballots_dir)
ballot_upload_id: str = ballot_upload_result['result']
ballot_num = 1
duplicate_count = 0
ballot_count = len(ballot_files)
for ballot_file in ballot_files:
self._log.debug('uploading ballot ' + ballot_file)
<DeepExtract>
eel.update_upload_status(f'Uploading ballot {ballot_num}/{ballot_count}')
</DeepExtract>
<DeepExtract>
ballot_file_path = os.path.join(ballots_dir, ballot_file)
with open(ballot_file_path, 'r', encoding='utf-8') as ballot_file:
ballot_contents = ballot_file.read()
result = self.upload_ballot(ballot_upload_id, election_id, ballot_file, ballot_contents)
</DeepExtract>
if not result['success']:
return result
if result['result']['is_duplicate']:
duplicate_count += 1
ballot_num += 1
return eel_success({'ballot_count': ballot_count, 'duplicate_count': duplicate_count})
except Exception as e:
return self.handle_error(e)
|
def upload_ballots(self, election_id: str) -> dict[str, Any]:
try:
eel.update_upload_status('Scanning drives')
try:
removable_drives = get_removable_drives()
self._log.trace(f'found {len(removable_drives)} removable drives')
candidate_drives = [self.parse_drive(drive) for drive in removable_drives if os.path.exists(os.path.join(drive, 'artifacts', 'encrypted_ballots')) and os.path.exists(os.path.join(drive, 'artifacts', 'devices'))]
first_candidate = next(iter(candidate_drives), None)
drive_info = eel_success(first_candidate)
except Exception as e:
drive_info = self.handle_error(e)
device_file_name = drive_info['result']['device_file_name']
device_file_path = drive_info['result']['device_file_path']
self._log.debug(f'uploading ballots for {election_id} from {device_file_path} device {device_file_name}')
eel.update_upload_status('Uploading device file')
with open(device_file_path, 'r', encoding='utf-8') as device_file:
ballot_upload = self.create_ballot_upload(election_id, device_file_name, device_file.read())
ballot_upload_result = ballot_upload
if not ballot_upload_result['success']:
return ballot_upload_result
ballots_dir: str = drive_info['result']['ballots_dir']
ballot_files = os.listdir(ballots_dir)
ballot_upload_id: str = ballot_upload_result['result']
ballot_num = 1
duplicate_count = 0
ballot_count = len(ballot_files)
for ballot_file in ballot_files:
self._log.debug('uploading ballot ' + ballot_file)
eel.update_upload_status(f'Uploading ballot {ballot_num}/{ballot_count}')
ballot_file_path = os.path.join(ballots_dir, ballot_file)
with open(ballot_file_path, 'r', encoding='utf-8') as ballot_file:
ballot_contents = ballot_file.read()
result = self.upload_ballot(ballot_upload_id, election_id, ballot_file, ballot_contents)
if not result['success']:
return result
if result['result']['is_duplicate']:
duplicate_count += 1
ballot_num += 1
return eel_success({'ballot_count': ballot_count, 'duplicate_count': duplicate_count})
except Exception as e:
return self.handle_error(e)
|
electionguard-python
|
positive
|
def cmdDelete(p_apiKey, p_orgs, p_admin):
if p_orgs is None:
return
for org in p_orgs:
<DeepExtract>
endpoint = '/organizations/%s/admins' % org['id']
(success, errors, headers, response) = merakiRequest(p_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, orgAdmins) = (success, errors, headers, response)
</DeepExtract>
<DeepExtract>
if not orgAdmins is None:
for admin in orgAdmins:
if admin['email'] == p_admin:
adminId = admin['id']
adminId = None
</DeepExtract>
if adminId is None:
<DeepExtract>
logString = '%s -- %s' % (datetime.datetime.now(), 'Skipping org "%s". Admin "%s" not found"' % (org['name'], p_admin))
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
else:
<DeepExtract>
endpoint = '/organizations/%s/admins/%s' % (org['id'], adminId)
(success, errors, headers, response) = merakiRequest(p_apiKey, 'DELETE', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, response) = (success, errors, headers, response)
</DeepExtract>
if success:
<DeepExtract>
logString = '%s -- %s' % (datetime.datetime.now(), 'Operation successful')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
else:
<DeepExtract>
logString = '%s -- %s' % (datetime.datetime.now(), 'Operation failed')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
|
def cmdDelete(p_apiKey, p_orgs, p_admin):
if p_orgs is None:
return
for org in p_orgs:
endpoint = '/organizations/%s/admins' % org['id']
(success, errors, headers, response) = merakiRequest(p_apiKey, 'GET', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, orgAdmins) = (success, errors, headers, response)
if not orgAdmins is None:
for admin in orgAdmins:
if admin['email'] == p_admin:
adminId = admin['id']
adminId = None
if adminId is None:
logString = '%s -- %s' % (datetime.datetime.now(), 'Skipping org "%s". Admin "%s" not found"' % (org['name'], p_admin))
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
else:
endpoint = '/organizations/%s/admins/%s' % (org['id'], adminId)
(success, errors, headers, response) = merakiRequest(p_apiKey, 'DELETE', endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
(success, errors, headers, response) = (success, errors, headers, response)
if success:
logString = '%s -- %s' % (datetime.datetime.now(), 'Operation successful')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
else:
logString = '%s -- %s' % (datetime.datetime.now(), 'Operation failed')
print(logString)
if not filePath is None:
try:
with open(filePath, 'a') as logFile:
logFile.write('%s\n' % logString)
except:
log('ERROR: Unable to append to log file')
</DeepExtract>
|
automation-scripts
|
positive
|
def prompt_search_tags(prompt_text):
"""
Do an interactive prompt search in the Bibmanager database by
the given keywords, with auto-complete and auto-suggest only
offering non-None values of the given field.
Only one keyword must be set in the prompt.
A bottom toolbar dynamically shows additional info.
Parameters
----------
prompt_text: String
Text to display when launching the prompt.
Returns
-------
kw_input: List of strings
List of the parsed input (same order as keywords).
Items are None for the keywords not defined.
"""
<DeepExtract>
if bm_database is None:
bm_database = u.BM_DATABASE()
try:
with open(bm_database, 'rb') as handle:
bibs = pickle.load(handle)
except:
bibs = []
bibs = bibs
</DeepExtract>
bibkeys = [bib.key for bib in bibs]
bibcodes = [bib.bibcode for bib in bibs if bib.bibcode is not None]
tags = sorted(set(itertools.chain(*[bib.tags for bib in bibs if bib.tags is not None])))
entries = bibkeys + bibcodes
key_words = {'': entries, 'tags:': tags}
completer = u.LastKeyCompleter(key_words)
suggester = u.LastKeySuggestCompleter()
validator = u.AlwaysPassValidator(bibs, toolbar_text="(Press 'tab' for autocomplete)")
session = prompt_toolkit.PromptSession(history=FileHistory(u.BM_HISTORY_TAGS()))
inputs = session.prompt(prompt_text, auto_suggest=suggester, completer=completer, complete_while_typing=False, validator=validator, validate_while_typing=True, bottom_toolbar=validator.bottom_toolbar)
text = inputs.replace(' tags:', ' tags: ')
if text.startswith('tags:'):
text = 'tags: ' + text[5:]
input_strings = text.split()
if 'tags:' not in input_strings:
tag_index = len(input_strings)
else:
tag_index = input_strings.index('tags:')
entries = input_strings[0:tag_index]
tags = input_strings[tag_index + 1:]
keys = [find(bibcode=entry).key if entry in bibcodes else entry for entry in entries]
keys = [key for key in keys if key in bibkeys]
return (keys, tags)
|
def prompt_search_tags(prompt_text):
"""
Do an interactive prompt search in the Bibmanager database by
the given keywords, with auto-complete and auto-suggest only
offering non-None values of the given field.
Only one keyword must be set in the prompt.
A bottom toolbar dynamically shows additional info.
Parameters
----------
prompt_text: String
Text to display when launching the prompt.
Returns
-------
kw_input: List of strings
List of the parsed input (same order as keywords).
Items are None for the keywords not defined.
"""
if bm_database is None:
bm_database = u.BM_DATABASE()
try:
with open(bm_database, 'rb') as handle:
bibs = pickle.load(handle)
except:
bibs = []
bibs = bibs
bibkeys = [bib.key for bib in bibs]
bibcodes = [bib.bibcode for bib in bibs if bib.bibcode is not None]
tags = sorted(set(itertools.chain(*[bib.tags for bib in bibs if bib.tags is not None])))
entries = bibkeys + bibcodes
key_words = {'': entries, 'tags:': tags}
completer = u.LastKeyCompleter(key_words)
suggester = u.LastKeySuggestCompleter()
validator = u.AlwaysPassValidator(bibs, toolbar_text="(Press 'tab' for autocomplete)")
session = prompt_toolkit.PromptSession(history=FileHistory(u.BM_HISTORY_TAGS()))
inputs = session.prompt(prompt_text, auto_suggest=suggester, completer=completer, complete_while_typing=False, validator=validator, validate_while_typing=True, bottom_toolbar=validator.bottom_toolbar)
text = inputs.replace(' tags:', ' tags: ')
if text.startswith('tags:'):
text = 'tags: ' + text[5:]
input_strings = text.split()
if 'tags:' not in input_strings:
tag_index = len(input_strings)
else:
tag_index = input_strings.index('tags:')
entries = input_strings[0:tag_index]
tags = input_strings[tag_index + 1:]
keys = [find(bibcode=entry).key if entry in bibcodes else entry for entry in entries]
keys = [key for key in keys if key in bibkeys]
return (keys, tags)
|
bibmanager
|
positive
|
def test_middle_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 10}))
<DeepExtract>
queryset = list(self.pagination.paginate_queryset(self.queryset, request))
</DeepExtract>
<DeepExtract>
response = self.pagination.get_paginated_response(queryset)
content = response.data
</DeepExtract>
<DeepExtract>
context = self.pagination.get_html_context()
</DeepExtract>
assert queryset == [11, 12, 13, 14, 15]
assert content == [11, 12, 13, 14, 15]
assert context == {'previous_url': 'http://testserver/?limit=5&offset=5', 'next_url': 'http://testserver/?limit=5&offset=15', 'page_links': [PageLink('http://testserver/?limit=5', 1, False, False), PageLink('http://testserver/?limit=5&offset=5', 2, False, False), PageLink('http://testserver/?limit=5&offset=10', 3, True, False), PageLink('http://testserver/?limit=5&offset=15', 4, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=95', 20, False, False)]}
<DeepExtract>
response = self.pagination.get_paginated_response(queryset)
content_range_data = response['Content-Range']
</DeepExtract>
assert content_range_data == 'items 10-14/100'
|
def test_middle_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 10}))
queryset = list(self.pagination.paginate_queryset(self.queryset, request))
response = self.pagination.get_paginated_response(queryset)
content = response.data
context = self.pagination.get_html_context()
assert queryset == [11, 12, 13, 14, 15]
assert content == [11, 12, 13, 14, 15]
assert context == {'previous_url': 'http://testserver/?limit=5&offset=5', 'next_url': 'http://testserver/?limit=5&offset=15', 'page_links': [PageLink('http://testserver/?limit=5', 1, False, False), PageLink('http://testserver/?limit=5&offset=5', 2, False, False), PageLink('http://testserver/?limit=5&offset=10', 3, True, False), PageLink('http://testserver/?limit=5&offset=15', 4, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=95', 20, False, False)]}
response = self.pagination.get_paginated_response(queryset)
content_range_data = response['Content-Range']
assert content_range_data == 'items 10-14/100'
|
django-rql
|
positive
|
def dfs_deserialize():
val = next(result)
if val == 'NONE':
return None
else:
node = TreeNode(int(val))
<DeepExtract>
val = next(result)
if val == 'NONE':
node.left = None
else:
node = TreeNode(int(val))
node.left = dfs_deserialize()
node.right = dfs_deserialize()
node.left = node
</DeepExtract>
<DeepExtract>
val = next(result)
if val == 'NONE':
node.right = None
else:
node = TreeNode(int(val))
node.left = dfs_deserialize()
node.right = dfs_deserialize()
node.right = node
</DeepExtract>
return node
|
def dfs_deserialize():
val = next(result)
if val == 'NONE':
return None
else:
node = TreeNode(int(val))
val = next(result)
if val == 'NONE':
node.left = None
else:
node = TreeNode(int(val))
node.left = dfs_deserialize()
node.right = dfs_deserialize()
node.left = node
val = next(result)
if val == 'NONE':
node.right = None
else:
node = TreeNode(int(val))
node.left = dfs_deserialize()
node.right = dfs_deserialize()
node.right = node
return node
|
Competitive_Programming
|
positive
|
def getCMY(self):
if self._cmy is not None:
return self._cmy
<DeepExtract>
(r, g, b, a) = self.getRgb()
(r, g, b) = (r / 255.0, g / 255.0, b / 255.0)
</DeepExtract>
return (1.0 - r, 1.0 - g, 1.0 - b)
|
def getCMY(self):
if self._cmy is not None:
return self._cmy
(r, g, b, a) = self.getRgb()
(r, g, b) = (r / 255.0, g / 255.0, b / 255.0)
return (1.0 - r, 1.0 - g, 1.0 - b)
|
color-palette
|
positive
|
def __init__(self, site_path: Union[str, Path, None]=None, settings: Optional[Settings]=None) -> None:
<DeepExtract>
site_path = site_path or os.environ.get('BALSAM_SITE_PATH') or SiteConfig.search_site_dir()
if site_path is None:
raise ValueError('Initialize SiteConfig with a `site_path` or set env BALSAM_SITE_PATH to a Balsam site directory containing a settings.py file.')
site_path = Path(site_path).resolve()
if not site_path.is_dir():
raise FileNotFoundError(f'BALSAM_SITE_PATH {site_path} must point to an existing Balsam site directory')
try:
site_id = int(site_path.joinpath('.balsam-site').read_text())
except FileNotFoundError:
raise FileNotFoundError(f'BALSAM_SITE_PATH {site_path} is not a valid Balsam site directory (does not contain a .balsam-site file)')
os.environ['BALSAM_SITE_PATH'] = str(site_path)
(site_path, site_id) = (site_path, site_id)
</DeepExtract>
self.site_path: Path = site_path
self.site_id: int = site_id
self.client = ClientSettings.load_from_file().build_client()
if settings is not None:
if not isinstance(settings, Settings):
raise ValueError("If you're passing the settings kwarg, it must be an instance of balsam.config.Settings. Otherwise, leave settings=None to auto-load the settings stored at BALSAM_SITE_PATH.")
self.settings = settings
return
yaml_settings = self.site_path.joinpath('settings.yml')
if not yaml_settings.is_file():
raise FileNotFoundError(f'{site_path} must contain a settings.yml')
try:
self.settings = Settings.load(yaml_settings)
except ValidationError as exc:
raise InvalidSettings(f'{yaml_settings} is invalid:\n{exc}')
|
def __init__(self, site_path: Union[str, Path, None]=None, settings: Optional[Settings]=None) -> None:
site_path = site_path or os.environ.get('BALSAM_SITE_PATH') or SiteConfig.search_site_dir()
if site_path is None:
raise ValueError('Initialize SiteConfig with a `site_path` or set env BALSAM_SITE_PATH to a Balsam site directory containing a settings.py file.')
site_path = Path(site_path).resolve()
if not site_path.is_dir():
raise FileNotFoundError(f'BALSAM_SITE_PATH {site_path} must point to an existing Balsam site directory')
try:
site_id = int(site_path.joinpath('.balsam-site').read_text())
except FileNotFoundError:
raise FileNotFoundError(f'BALSAM_SITE_PATH {site_path} is not a valid Balsam site directory (does not contain a .balsam-site file)')
os.environ['BALSAM_SITE_PATH'] = str(site_path)
(site_path, site_id) = (site_path, site_id)
self.site_path: Path = site_path
self.site_id: int = site_id
self.client = ClientSettings.load_from_file().build_client()
if settings is not None:
if not isinstance(settings, Settings):
raise ValueError("If you're passing the settings kwarg, it must be an instance of balsam.config.Settings. Otherwise, leave settings=None to auto-load the settings stored at BALSAM_SITE_PATH.")
self.settings = settings
return
yaml_settings = self.site_path.joinpath('settings.yml')
if not yaml_settings.is_file():
raise FileNotFoundError(f'{site_path} must contain a settings.yml')
try:
self.settings = Settings.load(yaml_settings)
except ValidationError as exc:
raise InvalidSettings(f'{yaml_settings} is invalid:\n{exc}')
|
balsam
|
positive
|
def _sethex(self, hexstring):
"""Reset the bitstring to have the value given in hexstring."""
<DeepExtract>
hexstring = ''.join(hexstring.split()).lower()
hexstring = hexstring
</DeepExtract>
hexstring = hexstring.replace('0x', '')
length = len(hexstring)
if length % 2:
hexstring += '0'
try:
try:
data = bytearray.fromhex(hexstring)
except TypeError:
data = bytearray.fromhex(unicode(hexstring))
except ValueError:
raise CreationError('Invalid symbol in hex initialiser.')
<DeepExtract>
self._datastore = ByteStore(data[:], length * 4, 0)
assert self._assertsanity()
</DeepExtract>
|
def _sethex(self, hexstring):
"""Reset the bitstring to have the value given in hexstring."""
hexstring = ''.join(hexstring.split()).lower()
hexstring = hexstring
hexstring = hexstring.replace('0x', '')
length = len(hexstring)
if length % 2:
hexstring += '0'
try:
try:
data = bytearray.fromhex(hexstring)
except TypeError:
data = bytearray.fromhex(unicode(hexstring))
except ValueError:
raise CreationError('Invalid symbol in hex initialiser.')
self._datastore = ByteStore(data[:], length * 4, 0)
assert self._assertsanity()
</DeepExtract>
|
Arduino-Telescope-Control
|
positive
|
def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0, truncation_strategy='longest_first', return_tensors=None):
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens and manages a window stride for
overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential
list of inputs.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
Return:
A Dictionary of shape::
{
input_ids: list[int],
overflowing_tokens: list[int] if a ``max_length`` is specified, else None
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
}
With the fields:
``input_ids``: list of tokens to be fed to a model
``overflowing_tokens``: list of overflowing tokens if a max length is specified.
``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
<DeepExtract>
if total_len - max_length <= 0:
(ids, pair_ids, overflowing_tokens) = (ids, pair_ids, [])
if truncation_strategy == 'longest_first':
overflowing_tokens = []
for _ in range(total_len - max_length):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == 'only_first':
assert len(ids) > total_len - max_length
window_len = min(len(ids), stride + total_len - max_length)
overflowing_tokens = ids[-window_len:]
ids = ids[:-total_len - max_length]
elif truncation_strategy == 'only_second':
assert pair_ids is not None and len(pair_ids) > total_len - max_length
window_len = min(len(pair_ids), stride + total_len - max_length)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-total_len - max_length]
elif truncation_strategy == 'do_not_truncate':
raise ValueError('Input sequence are too long for max_length. Please select a truncation strategy.')
else:
raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']")
(ids, pair_ids, overflowing_tokens) = (ids, pair_ids, overflowing_tokens)
</DeepExtract>
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
<DeepExtract>
logger.warning('This tokenizer does not make use of special tokens. Input is returned with no modification.')
if pair_ids is None:
sequence = ids
sequence = ids + pair_ids
</DeepExtract>
<DeepExtract>
logger.warning('This tokenizer does not make use of special tokens.')
if pair_ids is None:
token_type_ids = len(ids) * [0]
token_type_ids = [0] * len(ids) + [1] * len(pair_ids)
</DeepExtract>
<DeepExtract>
encoded_inputs['special_tokens_mask'] = [0] * ((len(pair_ids) if pair_ids else 0) + len(ids))
</DeepExtract>
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
if return_tensors == 'tf' and is_tf_available():
sequence = tf.constant([sequence])
token_type_ids = tf.constant([token_type_ids])
elif return_tensors == 'pt' and is_torch_available():
sequence = torch.tensor([sequence])
token_type_ids = torch.tensor([token_type_ids])
elif return_tensors is not None:
logger.warning('Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.'.format(return_tensors))
encoded_inputs['input_ids'] = sequence
encoded_inputs['token_type_ids'] = token_type_ids
if max_length and len(encoded_inputs['input_ids']) > max_length:
encoded_inputs['input_ids'] = encoded_inputs['input_ids'][:max_length]
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'][:max_length]
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'][:max_length]
return encoded_inputs
|
def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0, truncation_strategy='longest_first', return_tensors=None):
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
It adds special tokens, truncates
sequences if overflowing while taking into account the special tokens and manages a window stride for
overflowing tokens
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
stride: window stride for overflowing tokens. Can be useful for edge effect removal when using sequential
list of inputs.
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
or PyTorch torch.Tensor instead of a list of python integers.
Return:
A Dictionary of shape::
{
input_ids: list[int],
overflowing_tokens: list[int] if a ``max_length`` is specified, else None
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
}
With the fields:
``input_ids``: list of tokens to be fed to a model
``overflowing_tokens``: list of overflowing tokens if a max length is specified.
``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0)
if max_length and total_len > max_length:
if total_len - max_length <= 0:
(ids, pair_ids, overflowing_tokens) = (ids, pair_ids, [])
if truncation_strategy == 'longest_first':
overflowing_tokens = []
for _ in range(total_len - max_length):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == 'only_first':
assert len(ids) > total_len - max_length
window_len = min(len(ids), stride + total_len - max_length)
overflowing_tokens = ids[-window_len:]
ids = ids[:-total_len - max_length]
elif truncation_strategy == 'only_second':
assert pair_ids is not None and len(pair_ids) > total_len - max_length
window_len = min(len(pair_ids), stride + total_len - max_length)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-total_len - max_length]
elif truncation_strategy == 'do_not_truncate':
raise ValueError('Input sequence are too long for max_length. Please select a truncation strategy.')
else:
raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']")
(ids, pair_ids, overflowing_tokens) = (ids, pair_ids, overflowing_tokens)
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = total_len - max_length
if add_special_tokens:
logger.warning('This tokenizer does not make use of special tokens. Input is returned with no modification.')
if pair_ids is None:
sequence = ids
sequence = ids + pair_ids
logger.warning('This tokenizer does not make use of special tokens.')
if pair_ids is None:
token_type_ids = len(ids) * [0]
token_type_ids = [0] * len(ids) + [1] * len(pair_ids)
encoded_inputs['special_tokens_mask'] = [0] * ((len(pair_ids) if pair_ids else 0) + len(ids))
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else [])
if return_tensors == 'tf' and is_tf_available():
sequence = tf.constant([sequence])
token_type_ids = tf.constant([token_type_ids])
elif return_tensors == 'pt' and is_torch_available():
sequence = torch.tensor([sequence])
token_type_ids = torch.tensor([token_type_ids])
elif return_tensors is not None:
logger.warning('Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.'.format(return_tensors))
encoded_inputs['input_ids'] = sequence
encoded_inputs['token_type_ids'] = token_type_ids
if max_length and len(encoded_inputs['input_ids']) > max_length:
encoded_inputs['input_ids'] = encoded_inputs['input_ids'][:max_length]
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'][:max_length]
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'][:max_length]
return encoded_inputs
|
BERT-SDA
|
positive
|
def query_annotations():
<DeepExtract>
request = requests.post(config.graphql_url, json={'query': '\n query {\n allPapers {\n edges {\n node {\n arxivId\n goldTags\n tableSet {\n edges {\n node {\n name\n datasetText\n notes\n goldTags\n matrixGoldTags\n cellsSotaRecords\n parser\n }\n }\n }\n }\n }\n }\n }\n '})
if request.status_code == 200:
raw = request.json()
else:
raise Exception(f'Query error: status code {request.status_code}')
</DeepExtract>
return _load_annotated_papers(raw)
|
def query_annotations():
request = requests.post(config.graphql_url, json={'query': '\n query {\n allPapers {\n edges {\n node {\n arxivId\n goldTags\n tableSet {\n edges {\n node {\n name\n datasetText\n notes\n goldTags\n matrixGoldTags\n cellsSotaRecords\n parser\n }\n }\n }\n }\n }\n }\n }\n '})
if request.status_code == 200:
raw = request.json()
else:
raise Exception(f'Query error: status code {request.status_code}')
return _load_annotated_papers(raw)
|
axcell
|
positive
|
def _consolidate_linear_constraints(params_vec, linear_constraints, constr_info, param_names):
"""Consolidate linear constraints.
Consolidation entails the following steps:
- Plugging fixes and equality constraints into the linear constraints
- Collect weights of those constraints that overlap into weight DataFrames
- Collect corresponding right hand sides (bounds or values) in DataFrames
- Express box constraints of parameters involved in linear constraints as
additional linear constraints.
- Rescale the weights for easier detection of linear dependence
- Drop redundant constraints
- Check compatibility of constraints
- Construct a list of consolidated constraint dictionaries that contain
all matrices needed for the kernel transformations.
Args:
params_vec (np.ndarray): 1d numpy array wtih parameters
linear_constraints (list): Linear constraints that already have processed
weights and selector fields.
constr_info (dict): Dict with information about constraints.
param_names (list): Parameter names. Used for error messages.
Returns:
list: Processed and consolidated linear constraints.
"""
<DeepExtract>
(all_weights, all_values, all_lbs, all_ubs) = ([], [], [], [])
for constr in linear_constraints:
all_weights.append(constr['weights'])
all_values.append(constr.get('value', np.nan))
all_lbs.append(constr.get('lower_bound', -np.inf))
all_ubs.append(constr.get('upper_bound', np.inf))
weights = pd.concat(all_weights, axis=1).T.reset_index()
weights = weights.reindex(columns=np.arange(len(params_vec))).fillna(0)
values = pd.Series(all_values, name='value')
lbs = pd.Series(all_lbs, name='lower_bound')
ubs = pd.Series(all_ubs, name='upper_bound')
rhs = pd.concat([values, lbs, ubs], axis=1)
(weights, right_hand_side) = (weights, rhs)
</DeepExtract>
<DeepExtract>
w = weights.T
plugged_iloc = pd.Series(constr_info['post_replacements'])
plugged_iloc = plugged_iloc.where(plugged_iloc >= 0, np.arange(len(plugged_iloc)))
w['plugged_iloc'] = plugged_iloc
plugged_weights = w.groupby('plugged_iloc').sum()
plugged_weights = plugged_weights.reindex(w.index).fillna(0).T
weights = plugged_weights
</DeepExtract>
<DeepExtract>
ilocs = np.arange(len(constr_info['fixed_values']))
fixed_ilocs = ilocs[constr_info['is_fixed_to_value']].tolist()
new_rhs = right_hand_side.copy()
new_weights = weights.copy()
if len(fixed_ilocs) > 0:
fixed_values = constr_info['fixed_values'][fixed_ilocs]
fixed_contribution = weights[fixed_ilocs] @ fixed_values
for column in ['lower_bound', 'upper_bound', 'value']:
new_rhs[column] = new_rhs[column] - fixed_contribution
for i in fixed_ilocs:
new_weights[i] = 0
(weights, right_hand_side) = (new_weights, new_rhs)
</DeepExtract>
involved_parameters = [set(w[w != 0].index) for (_, w) in weights.iterrows()]
<DeepExtract>
bundles = []
while len(involved_parameters) > 0:
new_candidates = _unite_first_with_all_intersecting_elements(involved_parameters)
if len(involved_parameters) == len(new_candidates):
bundles.append(sorted(new_candidates[0]))
involved_parameters = involved_parameters[1:]
else:
involved_parameters = new_candidates
bundled_indices = bundles
</DeepExtract>
pc = []
for involved_parameters in bundled_indices:
w = weights[involved_parameters][(weights[involved_parameters] != 0).any(axis=1)].copy(deep=True)
rhs = right_hand_side.loc[w.index].copy(deep=True)
<DeepExtract>
additional_pc = []
for i in w.columns:
new = {}
if np.isfinite(constr_info['lower_bounds'][i]):
new['lower_bound'] = constr_info['lower_bounds'][i]
if np.isfinite(constr_info['upper_bounds'][i]):
new['upper_bound'] = constr_info['upper_bounds'][i]
if new != {}:
new['weights'] = pd.Series([1], name='w', index=[i])
additional_pc.append(new)
if len(additional_pc) > 0:
(new_weights, new_rhs) = _transform_linear_constraints_to_pandas_objects(additional_pc, len(constr_info['lower_bounds']))
new_weights = new_weights[w.columns]
extended_weights = pd.concat([w, new_weights]).reset_index(drop=True)
extended_rhs = pd.concat([rhs, new_rhs]).reset_index(drop=True)
else:
(extended_weights, extended_rhs) = (w, rhs)
(w, rhs) = (extended_weights, extended_rhs)
</DeepExtract>
<DeepExtract>
first_nonzero = w.replace(0, np.nan).bfill(axis=1).iloc[:, 0]
scaling_factor = 1 / first_nonzero.to_numpy().reshape(-1, 1)
new_weights = scaling_factor * w
scaled_rhs = scaling_factor * rhs
new_rhs = scaled_rhs.copy()
new_rhs['lower_bound'] = scaled_rhs['lower_bound'].where(scaling_factor.flatten() > 0, scaled_rhs['upper_bound'])
new_rhs['upper_bound'] = scaled_rhs['upper_bound'].where(scaling_factor.flatten() > 0, scaled_rhs['lower_bound'])
(w, rhs) = (new_weights, new_rhs)
</DeepExtract>
<DeepExtract>
w['dupl_group'] = w.groupby(list(w.columns)).grouper.group_info[0]
rhs['dupl_group'] = w['dupl_group']
w.set_index('dupl_group', inplace=True)
new_weights = w.drop_duplicates()
def _consolidate_fix(x):
vc = x.value_counts(dropna=True)
if len(vc) == 0:
(w, rhs) = np.nan
elif len(vc) == 1:
(w, rhs) = vc.index[0]
else:
raise ValueError
ub = rhs.groupby('dupl_group')['upper_bound'].min()
lb = rhs.groupby('dupl_group')['lower_bound'].max()
fix = rhs.groupby('dupl_group')['value'].apply(_consolidate_fix)
ub = ub.where(fix.isnull(), np.inf)
lb = lb.where(fix.isnull(), -np.inf)
new_rhs = pd.concat([lb, ub, fix], axis=1, names=['lower_bound', 'upper_bound', 'value'])
new_rhs = new_rhs.reindex(new_weights.index)
(w, rhs) = (new_weights, new_rhs)
</DeepExtract>
<DeepExtract>
(n_constraints, n_params) = w.shape
msg_too_many = 'Too many linear constraints. There can be at most as many linear constraintsas involved parameters with non-zero weights.\n'
msg_rank = 'The weights for linear constraints must be linearly independent.\n'
msg_general = 'The error occurred for constraints on the following parameters:\n{}\n with weighting matrix:\n{}\nIt is possible that you did not specify those constraints as linear constraints but as bounds, fixes, increasing or decreasing constraints.'
relevant_names = [param_names[i] for i in w.columns]
if n_constraints > n_params:
raise InvalidConstraintError(msg_too_many + msg_general.format(relevant_names, w))
if np.linalg.matrix_rank(w) < n_constraints:
raise InvalidConstraintError(msg_rank + msg_general.format(relevant_names, w))
</DeepExtract>
<DeepExtract>
(n_constraints, n_params) = w.shape
identity = np.eye(n_params)
i = 0
filled_weights = w
while len(filled_weights) < n_params:
candidate = np.vstack([identity[i], filled_weights])
if np.linalg.matrix_rank(candidate) == len(candidate):
filled_weights = candidate
i += 1
k = n_params - n_constraints
filled_weights[:k] = filled_weights[:k][::-1]
to_internal = filled_weights
from_internal = np.linalg.inv(to_internal)
(to_internal, from_internal) = (to_internal, from_internal)
</DeepExtract>
constr = {'index': list(w.columns), 'type': 'linear', 'to_internal': to_internal, 'from_internal': from_internal, 'right_hand_side': rhs}
pc.append(constr)
return pc
|
def _consolidate_linear_constraints(params_vec, linear_constraints, constr_info, param_names):
"""Consolidate linear constraints.
Consolidation entails the following steps:
- Plugging fixes and equality constraints into the linear constraints
- Collect weights of those constraints that overlap into weight DataFrames
- Collect corresponding right hand sides (bounds or values) in DataFrames
- Express box constraints of parameters involved in linear constraints as
additional linear constraints.
- Rescale the weights for easier detection of linear dependence
- Drop redundant constraints
- Check compatibility of constraints
- Construct a list of consolidated constraint dictionaries that contain
all matrices needed for the kernel transformations.
Args:
params_vec (np.ndarray): 1d numpy array wtih parameters
linear_constraints (list): Linear constraints that already have processed
weights and selector fields.
constr_info (dict): Dict with information about constraints.
param_names (list): Parameter names. Used for error messages.
Returns:
list: Processed and consolidated linear constraints.
"""
(all_weights, all_values, all_lbs, all_ubs) = ([], [], [], [])
for constr in linear_constraints:
all_weights.append(constr['weights'])
all_values.append(constr.get('value', np.nan))
all_lbs.append(constr.get('lower_bound', -np.inf))
all_ubs.append(constr.get('upper_bound', np.inf))
weights = pd.concat(all_weights, axis=1).T.reset_index()
weights = weights.reindex(columns=np.arange(len(params_vec))).fillna(0)
values = pd.Series(all_values, name='value')
lbs = pd.Series(all_lbs, name='lower_bound')
ubs = pd.Series(all_ubs, name='upper_bound')
rhs = pd.concat([values, lbs, ubs], axis=1)
(weights, right_hand_side) = (weights, rhs)
w = weights.T
plugged_iloc = pd.Series(constr_info['post_replacements'])
plugged_iloc = plugged_iloc.where(plugged_iloc >= 0, np.arange(len(plugged_iloc)))
w['plugged_iloc'] = plugged_iloc
plugged_weights = w.groupby('plugged_iloc').sum()
plugged_weights = plugged_weights.reindex(w.index).fillna(0).T
weights = plugged_weights
ilocs = np.arange(len(constr_info['fixed_values']))
fixed_ilocs = ilocs[constr_info['is_fixed_to_value']].tolist()
new_rhs = right_hand_side.copy()
new_weights = weights.copy()
if len(fixed_ilocs) > 0:
fixed_values = constr_info['fixed_values'][fixed_ilocs]
fixed_contribution = weights[fixed_ilocs] @ fixed_values
for column in ['lower_bound', 'upper_bound', 'value']:
new_rhs[column] = new_rhs[column] - fixed_contribution
for i in fixed_ilocs:
new_weights[i] = 0
(weights, right_hand_side) = (new_weights, new_rhs)
involved_parameters = [set(w[w != 0].index) for (_, w) in weights.iterrows()]
bundles = []
while len(involved_parameters) > 0:
new_candidates = _unite_first_with_all_intersecting_elements(involved_parameters)
if len(involved_parameters) == len(new_candidates):
bundles.append(sorted(new_candidates[0]))
involved_parameters = involved_parameters[1:]
else:
involved_parameters = new_candidates
bundled_indices = bundles
pc = []
for involved_parameters in bundled_indices:
w = weights[involved_parameters][(weights[involved_parameters] != 0).any(axis=1)].copy(deep=True)
rhs = right_hand_side.loc[w.index].copy(deep=True)
additional_pc = []
for i in w.columns:
new = {}
if np.isfinite(constr_info['lower_bounds'][i]):
new['lower_bound'] = constr_info['lower_bounds'][i]
if np.isfinite(constr_info['upper_bounds'][i]):
new['upper_bound'] = constr_info['upper_bounds'][i]
if new != {}:
new['weights'] = pd.Series([1], name='w', index=[i])
additional_pc.append(new)
if len(additional_pc) > 0:
(new_weights, new_rhs) = _transform_linear_constraints_to_pandas_objects(additional_pc, len(constr_info['lower_bounds']))
new_weights = new_weights[w.columns]
extended_weights = pd.concat([w, new_weights]).reset_index(drop=True)
extended_rhs = pd.concat([rhs, new_rhs]).reset_index(drop=True)
else:
(extended_weights, extended_rhs) = (w, rhs)
(w, rhs) = (extended_weights, extended_rhs)
first_nonzero = w.replace(0, np.nan).bfill(axis=1).iloc[:, 0]
scaling_factor = 1 / first_nonzero.to_numpy().reshape(-1, 1)
new_weights = scaling_factor * w
scaled_rhs = scaling_factor * rhs
new_rhs = scaled_rhs.copy()
new_rhs['lower_bound'] = scaled_rhs['lower_bound'].where(scaling_factor.flatten() > 0, scaled_rhs['upper_bound'])
new_rhs['upper_bound'] = scaled_rhs['upper_bound'].where(scaling_factor.flatten() > 0, scaled_rhs['lower_bound'])
(w, rhs) = (new_weights, new_rhs)
w['dupl_group'] = w.groupby(list(w.columns)).grouper.group_info[0]
rhs['dupl_group'] = w['dupl_group']
w.set_index('dupl_group', inplace=True)
new_weights = w.drop_duplicates()
def _consolidate_fix(x):
vc = x.value_counts(dropna=True)
if len(vc) == 0:
(w, rhs) = np.nan
elif len(vc) == 1:
(w, rhs) = vc.index[0]
else:
raise ValueError
ub = rhs.groupby('dupl_group')['upper_bound'].min()
lb = rhs.groupby('dupl_group')['lower_bound'].max()
fix = rhs.groupby('dupl_group')['value'].apply(_consolidate_fix)
ub = ub.where(fix.isnull(), np.inf)
lb = lb.where(fix.isnull(), -np.inf)
new_rhs = pd.concat([lb, ub, fix], axis=1, names=['lower_bound', 'upper_bound', 'value'])
new_rhs = new_rhs.reindex(new_weights.index)
(w, rhs) = (new_weights, new_rhs)
(n_constraints, n_params) = w.shape
msg_too_many = 'Too many linear constraints. There can be at most as many linear constraintsas involved parameters with non-zero weights.\n'
msg_rank = 'The weights for linear constraints must be linearly independent.\n'
msg_general = 'The error occurred for constraints on the following parameters:\n{}\n with weighting matrix:\n{}\nIt is possible that you did not specify those constraints as linear constraints but as bounds, fixes, increasing or decreasing constraints.'
relevant_names = [param_names[i] for i in w.columns]
if n_constraints > n_params:
raise InvalidConstraintError(msg_too_many + msg_general.format(relevant_names, w))
if np.linalg.matrix_rank(w) < n_constraints:
raise InvalidConstraintError(msg_rank + msg_general.format(relevant_names, w))
(n_constraints, n_params) = w.shape
identity = np.eye(n_params)
i = 0
filled_weights = w
while len(filled_weights) < n_params:
candidate = np.vstack([identity[i], filled_weights])
if np.linalg.matrix_rank(candidate) == len(candidate):
filled_weights = candidate
i += 1
k = n_params - n_constraints
filled_weights[:k] = filled_weights[:k][::-1]
to_internal = filled_weights
from_internal = np.linalg.inv(to_internal)
(to_internal, from_internal) = (to_internal, from_internal)
constr = {'index': list(w.columns), 'type': 'linear', 'to_internal': to_internal, 'from_internal': from_internal, 'right_hand_side': rhs}
pc.append(constr)
return pc
|
estimagic
|
positive
|
@retry
def get_posts_volume(self, first_date: datetime, second_date: datetime) -> int:
<DeepExtract>
param = Payload().creation_date(created_after=first_date, created_before=second_date).focuses(self.focus_ids).pagination(start=0, limit=self.api_request_limit)
param = param
</DeepExtract>
posts_volume = self.project.get_all_publications(param).total
return posts_volume
|
@retry
def get_posts_volume(self, first_date: datetime, second_date: datetime) -> int:
param = Payload().creation_date(created_after=first_date, created_before=second_date).focuses(self.focus_ids).pagination(start=0, limit=self.api_request_limit)
param = param
posts_volume = self.project.get_all_publications(param).total
return posts_volume
|
artefactory-connectors-kit
|
positive
|
def version(self):
localctx = qasm3Parser.VersionContext(self, self._ctx, self.state)
<DeepExtract>
if hasattr(localctx, 'enterProgram'):
localctx.enterProgram(self)
</DeepExtract>
try:
self.enterOuterAlt(localctx, 1)
self.state = 131
self.match(qasm3Parser.OPENQASM)
self.state = 132
self.match(qasm3Parser.VersionSpecifier)
self.state = 133
self.match(qasm3Parser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
<DeepExtract>
if hasattr(listener, 'exitProgram'):
listener.exitProgram(self)
</DeepExtract>
return localctx
|
def version(self):
localctx = qasm3Parser.VersionContext(self, self._ctx, self.state)
if hasattr(localctx, 'enterProgram'):
localctx.enterProgram(self)
try:
self.enterOuterAlt(localctx, 1)
self.state = 131
self.match(qasm3Parser.OPENQASM)
self.state = 132
self.match(qasm3Parser.VersionSpecifier)
self.state = 133
self.match(qasm3Parser.SEMICOLON)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
if hasattr(listener, 'exitProgram'):
listener.exitProgram(self)
return localctx
|
amazon-braket-default-simulator-python
|
positive
|
def publish(*params):
<DeepExtract>
parser = ArgumentParser('Boutiques publisher', description='A publisher of Boutiques tools in Zenodo (http://zenodo.org). Requires a Zenodo access token, see http://developers.zenodo.org/#authentication.')
parser.add_argument('boutiques_descriptor', action='store', help='local path of the Boutiques descriptor to publish.')
parser.add_argument('--sandbox', action='store_true', help="publish to Zenodo's sandbox instead of production server. Recommended for tests.")
parser.add_argument('--zenodo-token', action='store', help='Zenodo API token to use for authentication. If not used, token will be read from configuration file or requested interactively.')
parser.add_argument('--no-int', '-y', action='store_true', help='disable interactive input.')
parser.add_argument('-v', '--verbose', action='store_true', help='print information messages.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--replace', action='store_true', help='Publish an updated version of an existing record. The descriptor must contain a DOI, which will be replaced with a new one.')
group.add_argument('--id', action='store', help="Zenodo ID of an existing record you wish to update with a new version, prefixed by 'zenodo.' (e.g. zenodo.123456).")
parser = parser
</DeepExtract>
results = parser.parse_args(params)
from boutiques.publisher import Publisher
publisher = Publisher(results.boutiques_descriptor, results.zenodo_token, results.verbose, results.sandbox, results.no_int, results.replace, results.id)
publisher.publish()
if hasattr(publisher, 'doi'):
return publisher.doi
|
def publish(*params):
parser = ArgumentParser('Boutiques publisher', description='A publisher of Boutiques tools in Zenodo (http://zenodo.org). Requires a Zenodo access token, see http://developers.zenodo.org/#authentication.')
parser.add_argument('boutiques_descriptor', action='store', help='local path of the Boutiques descriptor to publish.')
parser.add_argument('--sandbox', action='store_true', help="publish to Zenodo's sandbox instead of production server. Recommended for tests.")
parser.add_argument('--zenodo-token', action='store', help='Zenodo API token to use for authentication. If not used, token will be read from configuration file or requested interactively.')
parser.add_argument('--no-int', '-y', action='store_true', help='disable interactive input.')
parser.add_argument('-v', '--verbose', action='store_true', help='print information messages.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--replace', action='store_true', help='Publish an updated version of an existing record. The descriptor must contain a DOI, which will be replaced with a new one.')
group.add_argument('--id', action='store', help="Zenodo ID of an existing record you wish to update with a new version, prefixed by 'zenodo.' (e.g. zenodo.123456).")
parser = parser
results = parser.parse_args(params)
from boutiques.publisher import Publisher
publisher = Publisher(results.boutiques_descriptor, results.zenodo_token, results.verbose, results.sandbox, results.no_int, results.replace, results.id)
publisher.publish()
if hasattr(publisher, 'doi'):
return publisher.doi
|
boutiques
|
positive
|
def bar_plot(*args, **kwargs):
<DeepExtract>
warnings.warn("The 'argopy.plotters' has been replaced by 'argopy.plot'. After 0.1.13, importing 'plotters' will raise an error. You're seeing this message because you called this function through the argopy 'plotters' module.", category=DeprecationWarning, stacklevel=2)
</DeepExtract>
from .plot import bar_plot
return bar_plot(*args, **kwargs)
|
def bar_plot(*args, **kwargs):
warnings.warn("The 'argopy.plotters' has been replaced by 'argopy.plot'. After 0.1.13, importing 'plotters' will raise an error. You're seeing this message because you called this function through the argopy 'plotters' module.", category=DeprecationWarning, stacklevel=2)
from .plot import bar_plot
return bar_plot(*args, **kwargs)
|
argopy
|
positive
|
def compute_percepts_prob(self, world_list):
<DeepExtract>
percepts_value = []
for world in world_list:
percepts_value.append(world.get_perception_vector())
percepts_value = np.stack(percepts_value).astype(np.float)
percepts_value = percepts_value
</DeepExtract>
num_demo = float(len(world_list))
percepts_sum = percepts_value.sum(axis=0)
percepts_diff = num_demo / 2.0 - abs(num_demo / 2.0 - percepts_sum)
percepts_diff = percepts_diff ** 2
if percepts_diff.sum() == 0:
percepts_diff[:] += 1e-10
percepts_prob = percepts_diff / percepts_diff.sum()
return percepts_prob
|
def compute_percepts_prob(self, world_list):
percepts_value = []
for world in world_list:
percepts_value.append(world.get_perception_vector())
percepts_value = np.stack(percepts_value).astype(np.float)
percepts_value = percepts_value
num_demo = float(len(world_list))
percepts_sum = percepts_value.sum(axis=0)
percepts_diff = num_demo / 2.0 - abs(num_demo / 2.0 - percepts_sum)
percepts_diff = percepts_diff ** 2
if percepts_diff.sum() == 0:
percepts_diff[:] += 1e-10
percepts_prob = percepts_diff / percepts_diff.sum()
return percepts_prob
|
demo2program
|
positive
|
def verify(self, token, nonce=None, max_age=None, organization=None):
"""Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.
Args:
token (str): The JWT to verify.
nonce (str, optional): The nonce value sent during authentication.
max_age (int, optional): The max_age value sent during authentication.
organization (str, optional): The expected organization ID (org_id) claim value. This should be specified
when logging in to an organization.
Returns:
the decoded payload from the token
Raises:
TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,
the token signature is invalid or the token has a claim missing or with unexpected value.
"""
if not token or not isinstance(token, str):
raise TokenValidationError('ID token is required but missing.')
payload = self._sv.verify_signature(token)
<DeepExtract>
if 'iss' not in payload or not isinstance(payload['iss'], str):
raise TokenValidationError('Issuer (iss) claim must be a string present in the ID token')
if payload['iss'] != self.iss:
raise TokenValidationError('Issuer (iss) claim mismatch in the ID token; expected "{}", found "{}"'.format(self.iss, payload['iss']))
if 'sub' not in payload or not isinstance(payload['sub'], str):
raise TokenValidationError('Subject (sub) claim must be a string present in the ID token')
if 'aud' not in payload or not isinstance(payload['aud'], (str, list)):
raise TokenValidationError('Audience (aud) claim must be a string or array of strings present in the ID token')
if isinstance(payload['aud'], list) and self.aud not in payload['aud']:
payload_audiences = ', '.join(payload['aud'])
raise TokenValidationError('Audience (aud) claim mismatch in the ID token; expected "{}" but was not one of "{}"'.format(self.aud, payload_audiences))
elif isinstance(payload['aud'], str) and payload['aud'] != self.aud:
raise TokenValidationError('Audience (aud) claim mismatch in the ID token; expected "{}" but found "{}"'.format(self.aud, payload['aud']))
now = self._clock or time.time()
leeway = self.leeway
if 'exp' not in payload or not isinstance(payload['exp'], int):
raise TokenValidationError('Expiration Time (exp) claim must be a number present in the ID token')
exp_time = payload['exp'] + leeway
if now > exp_time:
raise TokenValidationError('Expiration Time (exp) claim error in the ID token; current time ({}) is after expiration time ({})'.format(now, exp_time))
if 'iat' not in payload or not isinstance(payload['iat'], int):
raise TokenValidationError('Issued At (iat) claim must be a number present in the ID token')
if nonce:
if 'nonce' not in payload or not isinstance(payload['nonce'], str):
raise TokenValidationError('Nonce (nonce) claim must be a string present in the ID token')
if payload['nonce'] != nonce:
raise TokenValidationError('Nonce (nonce) claim mismatch in the ID token; expected "{}", found "{}"'.format(nonce, payload['nonce']))
if organization:
if 'org_id' not in payload or not isinstance(payload['org_id'], str):
raise TokenValidationError('Organization (org_id) claim must be a string present in the ID token')
if payload['org_id'] != organization:
raise TokenValidationError('Organization (org_id) claim mismatch in the ID token; expected "{}", found "{}"'.format(organization, payload['org_id']))
if isinstance(payload['aud'], list) and len(payload['aud']) > 1:
if 'azp' not in payload or not isinstance(payload['azp'], str):
raise TokenValidationError('Authorized Party (azp) claim must be a string present in the ID token when Audience (aud) claim has multiple values')
if payload['azp'] != self.aud:
raise TokenValidationError('Authorized Party (azp) claim mismatch in the ID token; expected "{}", found "{}"'.format(self.aud, payload['azp']))
if max_age:
if 'auth_time' not in payload or not isinstance(payload['auth_time'], int):
raise TokenValidationError('Authentication Time (auth_time) claim must be a number present in the ID token when Max Age (max_age) is specified')
auth_valid_until = payload['auth_time'] + max_age + leeway
if now > auth_valid_until:
raise TokenValidationError('Authentication Time (auth_time) claim in the ID token indicates that too much time has passed since the last end-user authentication. Current time ({}) is after last auth at ({})'.format(now, auth_valid_until))
</DeepExtract>
return payload
|
def verify(self, token, nonce=None, max_age=None, organization=None):
"""Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.
Args:
token (str): The JWT to verify.
nonce (str, optional): The nonce value sent during authentication.
max_age (int, optional): The max_age value sent during authentication.
organization (str, optional): The expected organization ID (org_id) claim value. This should be specified
when logging in to an organization.
Returns:
the decoded payload from the token
Raises:
TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,
the token signature is invalid or the token has a claim missing or with unexpected value.
"""
if not token or not isinstance(token, str):
raise TokenValidationError('ID token is required but missing.')
payload = self._sv.verify_signature(token)
if 'iss' not in payload or not isinstance(payload['iss'], str):
raise TokenValidationError('Issuer (iss) claim must be a string present in the ID token')
if payload['iss'] != self.iss:
raise TokenValidationError('Issuer (iss) claim mismatch in the ID token; expected "{}", found "{}"'.format(self.iss, payload['iss']))
if 'sub' not in payload or not isinstance(payload['sub'], str):
raise TokenValidationError('Subject (sub) claim must be a string present in the ID token')
if 'aud' not in payload or not isinstance(payload['aud'], (str, list)):
raise TokenValidationError('Audience (aud) claim must be a string or array of strings present in the ID token')
if isinstance(payload['aud'], list) and self.aud not in payload['aud']:
payload_audiences = ', '.join(payload['aud'])
raise TokenValidationError('Audience (aud) claim mismatch in the ID token; expected "{}" but was not one of "{}"'.format(self.aud, payload_audiences))
elif isinstance(payload['aud'], str) and payload['aud'] != self.aud:
raise TokenValidationError('Audience (aud) claim mismatch in the ID token; expected "{}" but found "{}"'.format(self.aud, payload['aud']))
now = self._clock or time.time()
leeway = self.leeway
if 'exp' not in payload or not isinstance(payload['exp'], int):
raise TokenValidationError('Expiration Time (exp) claim must be a number present in the ID token')
exp_time = payload['exp'] + leeway
if now > exp_time:
raise TokenValidationError('Expiration Time (exp) claim error in the ID token; current time ({}) is after expiration time ({})'.format(now, exp_time))
if 'iat' not in payload or not isinstance(payload['iat'], int):
raise TokenValidationError('Issued At (iat) claim must be a number present in the ID token')
if nonce:
if 'nonce' not in payload or not isinstance(payload['nonce'], str):
raise TokenValidationError('Nonce (nonce) claim must be a string present in the ID token')
if payload['nonce'] != nonce:
raise TokenValidationError('Nonce (nonce) claim mismatch in the ID token; expected "{}", found "{}"'.format(nonce, payload['nonce']))
if organization:
if 'org_id' not in payload or not isinstance(payload['org_id'], str):
raise TokenValidationError('Organization (org_id) claim must be a string present in the ID token')
if payload['org_id'] != organization:
raise TokenValidationError('Organization (org_id) claim mismatch in the ID token; expected "{}", found "{}"'.format(organization, payload['org_id']))
if isinstance(payload['aud'], list) and len(payload['aud']) > 1:
if 'azp' not in payload or not isinstance(payload['azp'], str):
raise TokenValidationError('Authorized Party (azp) claim must be a string present in the ID token when Audience (aud) claim has multiple values')
if payload['azp'] != self.aud:
raise TokenValidationError('Authorized Party (azp) claim mismatch in the ID token; expected "{}", found "{}"'.format(self.aud, payload['azp']))
if max_age:
if 'auth_time' not in payload or not isinstance(payload['auth_time'], int):
raise TokenValidationError('Authentication Time (auth_time) claim must be a number present in the ID token when Max Age (max_age) is specified')
auth_valid_until = payload['auth_time'] + max_age + leeway
if now > auth_valid_until:
raise TokenValidationError('Authentication Time (auth_time) claim in the ID token indicates that too much time has passed since the last end-user authentication. Current time ({}) is after last auth at ({})'.format(now, auth_valid_until))
return payload
|
auth0-python
|
positive
|
def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
<DeepExtract>
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
(clone_loss, clone_grad) = (sum_loss, clone_grad)
</DeepExtract>
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
regularization_losses = None
total_loss = tf.add_n(clones_losses, name='total_loss')
<DeepExtract>
sum_grads = []
for grad_and_vars in zip(*grads_and_vars):
grads = []
var = grad_and_vars[0][1]
for (g, v) in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
grads_and_vars = sum_grads
</DeepExtract>
return (total_loss, grads_and_vars)
|
def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
(clone_loss, clone_grad) = (sum_loss, clone_grad)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
regularization_losses = None
total_loss = tf.add_n(clones_losses, name='total_loss')
sum_grads = []
for grad_and_vars in zip(*grads_and_vars):
grads = []
var = grad_and_vars[0][1]
for (g, v) in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
grads_and_vars = sum_grads
return (total_loss, grads_and_vars)
|
CVTron
|
positive
|
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos):
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
<DeepExtract>
difficultys = [0, 1, 2]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0, min_overlaps, compute_aos)
mAP_bbox = get_mAP(ret['precision'])
mAP_bbox_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos = get_mAP(ret['orientation'])
mAP_aos_R40 = get_mAP_R40(ret['orientation'])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, min_overlaps)
mAP_bev = get_mAP(ret['precision'])
mAP_bev_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, min_overlaps)
mAP_3d = get_mAP(ret['precision'])
mAP_3d_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
(mAP_bbox, mAP_bev, mAP_3d, mAP_aos) = (mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40)
</DeepExtract>
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return (mAP_bbox, mAP_bev, mAP_3d, mAP_aos)
|
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos):
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
difficultys = [0, 1, 2]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0, min_overlaps, compute_aos)
mAP_bbox = get_mAP(ret['precision'])
mAP_bbox_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos = get_mAP(ret['orientation'])
mAP_aos_R40 = get_mAP_R40(ret['orientation'])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, min_overlaps)
mAP_bev = get_mAP(ret['precision'])
mAP_bev_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, min_overlaps)
mAP_3d = get_mAP(ret['precision'])
mAP_3d_R40 = get_mAP_R40(ret['precision'])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
(mAP_bbox, mAP_bev, mAP_3d, mAP_aos) = (mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40)
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return (mAP_bbox, mAP_bev, mAP_3d, mAP_aos)
|
CaDDN
|
positive
|
@property
def attack_modifier(self):
<DeepExtract>
if not self.features_applied and self.wielder is not None:
self.features_applied = True
for f in self.wielder.features:
f.weapon_func(self)
</DeepExtract>
mod = self.attack_bonus
if self.wielder is not None:
mod += self.ability_mod
if self.wielder.is_proficient(self):
mod += self.wielder.proficiency_bonus
return mod
|
@property
def attack_modifier(self):
if not self.features_applied and self.wielder is not None:
self.features_applied = True
for f in self.wielder.features:
f.weapon_func(self)
mod = self.attack_bonus
if self.wielder is not None:
mod += self.ability_mod
if self.wielder.is_proficient(self):
mod += self.wielder.proficiency_bonus
return mod
|
dungeon-sheets
|
positive
|
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey('schema.LocalBookWithM2MThrough', CASCADE)
tag = ForeignKey('schema.TagM2MTest', CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass('TagM2MTest', related_name='books', through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
<DeepExtract>
with connection.cursor() as cursor:
columns = {d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description(cursor, LocalTagThrough._meta.db_table)}
for (name, (type, desc)) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
if not columns:
raise DatabaseError('Table does not exist (empty pragma)')
columns = columns
</DeepExtract>
self.assertEqual(columns['book_id'][0], connection.features.introspected_field_types['IntegerField'])
self.assertEqual(columns['tag_id'][0], connection.features.introspected_field_types['IntegerField'])
|
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey('schema.LocalBookWithM2MThrough', CASCADE)
tag = ForeignKey('schema.TagM2MTest', CASCADE)
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass('TagM2MTest', related_name='books', through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
with connection.cursor() as cursor:
columns = {d[0]: (connection.introspection.get_field_type(d[1], d), d) for d in connection.introspection.get_table_description(cursor, LocalTagThrough._meta.db_table)}
for (name, (type, desc)) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
if not columns:
raise DatabaseError('Table does not exist (empty pragma)')
columns = columns
self.assertEqual(columns['book_id'][0], connection.features.introspected_field_types['IntegerField'])
self.assertEqual(columns['tag_id'][0], connection.features.introspected_field_types['IntegerField'])
|
django-firebird
|
positive
|
def test_ok_when_preferred_setup(settings_with_register_verification, settings_with_register_email_verification, settings_with_reset_password_verification):
<DeepExtract>
app_configs = apps.app_configs
errors = []
all_checks = registry.get_checks(False)
rest_registration_checks = [check for check in all_checks if check.__module__.startswith('rest_registration.')]
for check in rest_registration_checks:
errors.extend(check(app_configs))
errors = errors
</DeepExtract>
<DeepExtract>
error_ids = sorted((e.id for e in errors))
expected_error_ids = sorted((code.get_full_code_id() for code in []))
msg = '\n\nList of errors:\n'
for error in errors:
msg += '- {error}\n'.format(error=error)
msg += ' does not match the codes: '
if []:
msg += ', '.join((str(e) for e in []))
else:
msg += '(empty list)'
assert error_ids == expected_error_ids, msg
</DeepExtract>
|
def test_ok_when_preferred_setup(settings_with_register_verification, settings_with_register_email_verification, settings_with_reset_password_verification):
app_configs = apps.app_configs
errors = []
all_checks = registry.get_checks(False)
rest_registration_checks = [check for check in all_checks if check.__module__.startswith('rest_registration.')]
for check in rest_registration_checks:
errors.extend(check(app_configs))
errors = errors
error_ids = sorted((e.id for e in errors))
expected_error_ids = sorted((code.get_full_code_id() for code in []))
msg = '\n\nList of errors:\n'
for error in errors:
msg += '- {error}\n'.format(error=error)
msg += ' does not match the codes: '
if []:
msg += ', '.join((str(e) for e in []))
else:
msg += '(empty list)'
assert error_ids == expected_error_ids, msg
</DeepExtract>
|
django-rest-registration
|
positive
|
def test_perspective_chrono_input_keys(display):
<DeepExtract>
display._active_perspective = 5
perspective = display.perspectives[5]
</DeepExtract>
display._footer_window.getch = mock.MagicMock(return_value=10)
ret_val = perspective.handle_input(ord('h'))
assert ret_val
display._stdscr.reset_mock()
movement_keys = [display.KEY_MAPPING[Config['key_up']], display.KEY_MAPPING[Config['key_right']], display.KEY_MAPPING[Config['key_down']], display.KEY_MAPPING[Config['key_left']], display.KEY_MAPPING[Config['key_scroll_up']], display.KEY_MAPPING[Config['key_scroll_down']]]
for key in movement_keys:
perspective._metadata_updated = True
ret_val = perspective.handle_input(key)
assert ret_val
assert not perspective._metadata_updated
operation_keys = [display.KEY_MAPPING[Config['key_delete']], display.KEY_MAPPING[Config['key_remove']], display.KEY_MAPPING[Config['key_reload']], display.KEY_MAPPING[Config['key_reload_selected']], display.KEY_MAPPING[Config['key_save']], display.KEY_MAPPING[Config['key_play_selected']], display.KEY_MAPPING[Config['key_add_selected']], display.KEY_MAPPING[Config['key_clear']], display.KEY_MAPPING[Config['key_next']], display.KEY_MAPPING[Config['key_invert']], display.KEY_MAPPING[Config['key_pause_play']], display.KEY_MAPPING[Config['key_pause_play_alt']], display.KEY_MAPPING[Config['key_seek_forward']], display.KEY_MAPPING[Config['key_seek_forward_alt']], display.KEY_MAPPING[Config['key_seek_backward']], display.KEY_MAPPING[Config['key_seek_backward_alt']], display.KEY_MAPPING[Config['key_mark_played']], display.KEY_MAPPING[Config['key_rate_increase']], display.KEY_MAPPING[Config['key_rate_decrease']], display.KEY_MAPPING[Config['key_show_url']], display.KEY_MAPPING[Config['key_execute']]]
for key in operation_keys:
display._active_window = 0
assert perspective.handle_input(key)
ret_val = perspective.handle_input(ord('q'))
assert not ret_val
display._stdscr.reset_mock()
|
def test_perspective_chrono_input_keys(display):
display._active_perspective = 5
perspective = display.perspectives[5]
display._footer_window.getch = mock.MagicMock(return_value=10)
ret_val = perspective.handle_input(ord('h'))
assert ret_val
display._stdscr.reset_mock()
movement_keys = [display.KEY_MAPPING[Config['key_up']], display.KEY_MAPPING[Config['key_right']], display.KEY_MAPPING[Config['key_down']], display.KEY_MAPPING[Config['key_left']], display.KEY_MAPPING[Config['key_scroll_up']], display.KEY_MAPPING[Config['key_scroll_down']]]
for key in movement_keys:
perspective._metadata_updated = True
ret_val = perspective.handle_input(key)
assert ret_val
assert not perspective._metadata_updated
operation_keys = [display.KEY_MAPPING[Config['key_delete']], display.KEY_MAPPING[Config['key_remove']], display.KEY_MAPPING[Config['key_reload']], display.KEY_MAPPING[Config['key_reload_selected']], display.KEY_MAPPING[Config['key_save']], display.KEY_MAPPING[Config['key_play_selected']], display.KEY_MAPPING[Config['key_add_selected']], display.KEY_MAPPING[Config['key_clear']], display.KEY_MAPPING[Config['key_next']], display.KEY_MAPPING[Config['key_invert']], display.KEY_MAPPING[Config['key_pause_play']], display.KEY_MAPPING[Config['key_pause_play_alt']], display.KEY_MAPPING[Config['key_seek_forward']], display.KEY_MAPPING[Config['key_seek_forward_alt']], display.KEY_MAPPING[Config['key_seek_backward']], display.KEY_MAPPING[Config['key_seek_backward_alt']], display.KEY_MAPPING[Config['key_mark_played']], display.KEY_MAPPING[Config['key_rate_increase']], display.KEY_MAPPING[Config['key_rate_decrease']], display.KEY_MAPPING[Config['key_show_url']], display.KEY_MAPPING[Config['key_execute']]]
for key in operation_keys:
display._active_window = 0
assert perspective.handle_input(key)
ret_val = perspective.handle_input(ord('q'))
assert not ret_val
display._stdscr.reset_mock()
|
castero
|
positive
|
def _calc_lr_ARPACK(self, x, tmp, calc_l=False, A1=None, A2=None, rescale=True, tol=1e-14, ncv=None, nev=1, max_retries=4, which='LM'):
if A1 is None:
A1 = self.A
if A2 is None:
A2 = self.A
if ncv is None:
ncv = max(20, 2 * nev + 1)
symmetric = sp.all([A1[j] is A2[j] for j in range(len(A1))])
n = x.size
<DeepExtract>
if self.ev_arpack_CUDA:
from . import cuda_alternatives as tcu
opE = tcu.EOp_CUDA(A1, A2, calc_l, use_batch=self.D <= self.CUDA_batch_maxD)
else:
opE = EOp(A1, A2, calc_l)
opE = opE
</DeepExtract>
x *= n / la.norm(x.ravel())
v0 = x.ravel()
for i in range(max_retries):
if i > 0:
log.warning('_calc_lr_ARPACK: Retry #%u (%s)', i, 'l' if calc_l else 'r')
try:
(ev, eV) = las.eigs(opE, which=which, k=nev, v0=v0, tol=tol, ncv=ncv)
conv = True
ind = abs(ev).argmax()
ev = np.real_if_close(ev[ind])
ev = np.asscalar(ev)
eV = eV[:, ind]
if abs(ev) < 1e-12:
raise ValueError('Largest eigenvalue too small!')
if symmetric and np.imag(ev) != 0:
raise ValueError('Largest eigenvalue is not real (%g)! (ncv too small?)' % np.imag(ev))
break
except (las.ArpackNoConvergence, ValueError) as e:
log.warning('_calc_lr_ARPACK(nev=%u,ncv=%u): %s Try %u! (%s)', nev, ncv, e, i, 'l' if calc_l else 'r')
v0 = None
nev += 1
ncv += 5
if i == max_retries - 1:
log.error('_calc_lr_ARPACK(nev=%u,ncv=%u): Failed to converge! (%s)', nev, ncv, 'l' if calc_l else 'r')
raise EvoMPSNoConvergence('_calc_lr_ARPACK: Failed to converge!')
eVmean = eV.mean()
eV *= sp.sqrt(sp.conj(eVmean) / eVmean)
if eV.mean() < 0:
eV *= -1
eV = eV.reshape(self.D, self.D)
x[:] = eV
if rescale:
fac = (1 / sp.sqrt(ev)) ** (1.0 / len(A1))
for A in A1:
A *= fac
if self.sanity_checks:
if not symmetric:
log.warning('Sanity check failed: Re-scaling with A1 <> A2!')
tmp = opE.matvec(x.ravel())
ev = tmp.mean() / x.mean()
if not abs(ev - 1) < tol:
log.warning('Sanity check failed: Largest ev after re-scale = %s', ev)
return (x, conv, opE.calls, nev, ncv)
|
def _calc_lr_ARPACK(self, x, tmp, calc_l=False, A1=None, A2=None, rescale=True, tol=1e-14, ncv=None, nev=1, max_retries=4, which='LM'):
if A1 is None:
A1 = self.A
if A2 is None:
A2 = self.A
if ncv is None:
ncv = max(20, 2 * nev + 1)
symmetric = sp.all([A1[j] is A2[j] for j in range(len(A1))])
n = x.size
if self.ev_arpack_CUDA:
from . import cuda_alternatives as tcu
opE = tcu.EOp_CUDA(A1, A2, calc_l, use_batch=self.D <= self.CUDA_batch_maxD)
else:
opE = EOp(A1, A2, calc_l)
opE = opE
x *= n / la.norm(x.ravel())
v0 = x.ravel()
for i in range(max_retries):
if i > 0:
log.warning('_calc_lr_ARPACK: Retry #%u (%s)', i, 'l' if calc_l else 'r')
try:
(ev, eV) = las.eigs(opE, which=which, k=nev, v0=v0, tol=tol, ncv=ncv)
conv = True
ind = abs(ev).argmax()
ev = np.real_if_close(ev[ind])
ev = np.asscalar(ev)
eV = eV[:, ind]
if abs(ev) < 1e-12:
raise ValueError('Largest eigenvalue too small!')
if symmetric and np.imag(ev) != 0:
raise ValueError('Largest eigenvalue is not real (%g)! (ncv too small?)' % np.imag(ev))
break
except (las.ArpackNoConvergence, ValueError) as e:
log.warning('_calc_lr_ARPACK(nev=%u,ncv=%u): %s Try %u! (%s)', nev, ncv, e, i, 'l' if calc_l else 'r')
v0 = None
nev += 1
ncv += 5
if i == max_retries - 1:
log.error('_calc_lr_ARPACK(nev=%u,ncv=%u): Failed to converge! (%s)', nev, ncv, 'l' if calc_l else 'r')
raise EvoMPSNoConvergence('_calc_lr_ARPACK: Failed to converge!')
eVmean = eV.mean()
eV *= sp.sqrt(sp.conj(eVmean) / eVmean)
if eV.mean() < 0:
eV *= -1
eV = eV.reshape(self.D, self.D)
x[:] = eV
if rescale:
fac = (1 / sp.sqrt(ev)) ** (1.0 / len(A1))
for A in A1:
A *= fac
if self.sanity_checks:
if not symmetric:
log.warning('Sanity check failed: Re-scaling with A1 <> A2!')
tmp = opE.matvec(x.ravel())
ev = tmp.mean() / x.mean()
if not abs(ev - 1) < tol:
log.warning('Sanity check failed: Largest ev after re-scale = %s', ev)
return (x, conv, opE.calls, nev, ncv)
|
evoMPS
|
positive
|
def test_interpolate_1d_linear_single_value_tolerated(self):
"""1D linear interpolation. If tolerated, a single value input must be extrapolated to every real value.
"""
<DeepExtract>
if [2.0] is None:
[2.0] = self.x
if [4.0] is None:
[4.0] = self.data
self.interp_data = np.array([1.0, 0.827344627425, 0.65468925485, 0.482033882274, 0.19022089727, -0.135637119857, -0.461495136984, -0.671036971053, -0.787525858675, -0.904014746296, -0.907509439898, -0.685015745458, -0.462522051019, -0.229870470166, 0.084044201987, 0.397958874141, 0.711873546294, 0.796577712584, 0.852630565642, 0.908683418699, 0.751249583376, 0.487072403865, 0.222895224353, -0.052965755187, -0.343431484761, -0.633897214335, -0.858384419526, -0.851946789376, -0.845509159226, -0.839071529076], dtype=np.float64)
self.extrap_data_nea = np.array([1.0, 1.0, -0.839071529076, -0.839071529076], dtype=np.float64)
self.extrap_data_lin = np.array([1.4005604643743956, 1.2002802321871977, -0.831603878102657, -0.8241362271288615], dtype=np.float64)
self.interp_func = interpolators1d.Interpolate1DLinear([2.0], [4.0], extrapolate=extrapolate, extrapolation_range=extrapolation_range, extrapolation_type=extrapolation_type, tolerate_single_value=True)
</DeepExtract>
self.assertAlmostEqual(self.interp_func(-31946139.346), 4.0, delta=1e-08)
self.assertAlmostEqual(self.interp_func(31946139.346), 4.0, delta=1e-08)
self.assertAlmostEqual(self.interp_func(2.0), 4.0, delta=1e-08)
|
def test_interpolate_1d_linear_single_value_tolerated(self):
"""1D linear interpolation. If tolerated, a single value input must be extrapolated to every real value.
"""
if [2.0] is None:
[2.0] = self.x
if [4.0] is None:
[4.0] = self.data
self.interp_data = np.array([1.0, 0.827344627425, 0.65468925485, 0.482033882274, 0.19022089727, -0.135637119857, -0.461495136984, -0.671036971053, -0.787525858675, -0.904014746296, -0.907509439898, -0.685015745458, -0.462522051019, -0.229870470166, 0.084044201987, 0.397958874141, 0.711873546294, 0.796577712584, 0.852630565642, 0.908683418699, 0.751249583376, 0.487072403865, 0.222895224353, -0.052965755187, -0.343431484761, -0.633897214335, -0.858384419526, -0.851946789376, -0.845509159226, -0.839071529076], dtype=np.float64)
self.extrap_data_nea = np.array([1.0, 1.0, -0.839071529076, -0.839071529076], dtype=np.float64)
self.extrap_data_lin = np.array([1.4005604643743956, 1.2002802321871977, -0.831603878102657, -0.8241362271288615], dtype=np.float64)
self.interp_func = interpolators1d.Interpolate1DLinear([2.0], [4.0], extrapolate=extrapolate, extrapolation_range=extrapolation_range, extrapolation_type=extrapolation_type, tolerate_single_value=True)
self.assertAlmostEqual(self.interp_func(-31946139.346), 4.0, delta=1e-08)
self.assertAlmostEqual(self.interp_func(31946139.346), 4.0, delta=1e-08)
self.assertAlmostEqual(self.interp_func(2.0), 4.0, delta=1e-08)
|
core
|
positive
|
def do_timespans(command, settings):
(date_time, output_format) = convert_date_time(command.dateTime, settings)
original_date_time = date_time
for operand in command.operandList:
<DeepExtract>
delta_date_time = date_time
for timespan in operand.timeSpans:
delta_operand = relativedelta(seconds=int(timespan.amount) if timespan.span == 's' else 0, minutes=int(timespan.amount) if timespan.span == 'M' else 0, hours=int(timespan.amount) if timespan.span == 'h' else 0, days=int(timespan.amount) if timespan.span == 'd' else 0, weeks=int(timespan.amount) if timespan.span == 'w' else 0, months=int(timespan.amount) if timespan.span == 'm' else 0, years=int(timespan.amount) if timespan.span == 'y' else 0)
if operand.operator == '+':
delta_date_time += delta_operand
else:
delta_date_time -= delta_operand
date_time = delta_date_time
</DeepExtract>
<DeepExtract>
if not hasattr(command, 'exclusionCommands'):
date_time = date_time
exclusion_day_set = build_exclusion_day_set(command.exclusionCommands)
if len(exclusion_day_set) >= 7:
raise ExclusionNoDaysFoundError
starting_date_time = original_date_time
lookahead_date = date_time
lookahead_count = 0
extra_days = calculate_rrule_exclusions(starting_date_time, lookahead_date, command.exclusionCommands, settings)
while extra_days > 0:
starting_date_time = lookahead_date
lookahead_date = lookahead_date + timedelta(days=extra_days)
lookahead_count = lookahead_count + 1
if lookahead_count >= MAX_LOOKAHEAD_ATTEMPTS:
raise ExclusionTooFarAheadError
extra_days = calculate_rrule_exclusions(starting_date_time, lookahead_date, command.exclusionCommands, settings)
date_time = lookahead_date
</DeepExtract>
return date_time.strftime(output_format)
|
def do_timespans(command, settings):
(date_time, output_format) = convert_date_time(command.dateTime, settings)
original_date_time = date_time
for operand in command.operandList:
delta_date_time = date_time
for timespan in operand.timeSpans:
delta_operand = relativedelta(seconds=int(timespan.amount) if timespan.span == 's' else 0, minutes=int(timespan.amount) if timespan.span == 'M' else 0, hours=int(timespan.amount) if timespan.span == 'h' else 0, days=int(timespan.amount) if timespan.span == 'd' else 0, weeks=int(timespan.amount) if timespan.span == 'w' else 0, months=int(timespan.amount) if timespan.span == 'm' else 0, years=int(timespan.amount) if timespan.span == 'y' else 0)
if operand.operator == '+':
delta_date_time += delta_operand
else:
delta_date_time -= delta_operand
date_time = delta_date_time
if not hasattr(command, 'exclusionCommands'):
date_time = date_time
exclusion_day_set = build_exclusion_day_set(command.exclusionCommands)
if len(exclusion_day_set) >= 7:
raise ExclusionNoDaysFoundError
starting_date_time = original_date_time
lookahead_date = date_time
lookahead_count = 0
extra_days = calculate_rrule_exclusions(starting_date_time, lookahead_date, command.exclusionCommands, settings)
while extra_days > 0:
starting_date_time = lookahead_date
lookahead_date = lookahead_date + timedelta(days=extra_days)
lookahead_count = lookahead_count + 1
if lookahead_count >= MAX_LOOKAHEAD_ATTEMPTS:
raise ExclusionTooFarAheadError
extra_days = calculate_rrule_exclusions(starting_date_time, lookahead_date, command.exclusionCommands, settings)
date_time = lookahead_date
return date_time.strftime(output_format)
|
Alfred-Workflows-DateCalculator
|
positive
|
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
print()
print('_' * 79)
if self.headline:
print(f' *** {self.headline} ***')
for (no, q) in enumerate(self.captured_queries, 1):
<DeepExtract>
q['sql'] = q['sql'].replace("u'", "'").replace('u"', '"')
q['sql'] = q['sql'].replace('`', '')
for keyword in PFORMAT_SQL_KEYWORDS:
q['sql'] = q['sql'].replace(f' {keyword} ', f'\n\t{keyword} ')
q['sql'] = smart_str(q['sql'])
</DeepExtract>
msg = smart_str(f'{no:d} - {sql}\n')
print(msg)
print('-' * 79)
|
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
print()
print('_' * 79)
if self.headline:
print(f' *** {self.headline} ***')
for (no, q) in enumerate(self.captured_queries, 1):
q['sql'] = q['sql'].replace("u'", "'").replace('u"', '"')
q['sql'] = q['sql'].replace('`', '')
for keyword in PFORMAT_SQL_KEYWORDS:
q['sql'] = q['sql'].replace(f' {keyword} ', f'\n\t{keyword} ')
q['sql'] = smart_str(q['sql'])
msg = smart_str(f'{no:d} - {sql}\n')
print(msg)
print('-' * 79)
|
django-tools
|
positive
|
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
<DeepExtract>
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
p_att_feats = self.ctx2att(att_feats)
(p_fc_feats, p_att_feats, pp_att_feats, p_att_masks) = (fc_feats, att_feats, p_att_feats, att_masks)
</DeepExtract>
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size * sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.seq_length, self.vocab_size + 1)
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
<DeepExtract>
weight = self.logit.weight if hasattr(self.logit, 'weight') else self.logit[0].weight
state = (weight.new_zeros(self.num_layers, beam_size, self.rnn_size), weight.new_zeros(self.num_layers, beam_size, self.rnn_size))
</DeepExtract>
(tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks) = utils.repeat_tensors(beam_size, [p_fc_feats[k:k + 1], p_att_feats[k:k + 1], pp_att_feats[k:k + 1], p_att_masks[k:k + 1] if att_masks is not None else None])
for t in range(1):
if t == 0:
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
<DeepExtract>
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, show_gate_labels, task)
else:
(output, state, word_box_attn) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
if task == 'caption':
(logprobs, state) = (logprobs, state, word_box_attn)
else:
(logprobs, state) = (logprobs, state)
elif task == 'both':
(output, state, output_trace) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state, self.model.generator_trace(output_trace))
</DeepExtract>
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k * sample_n + _n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k * sample_n + _n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq']
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
return (seq, seqLogprobs)
|
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
(att_feats, att_masks) = self.clip_att(att_feats, att_masks)
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
p_att_feats = self.ctx2att(att_feats)
(p_fc_feats, p_att_feats, pp_att_feats, p_att_masks) = (fc_feats, att_feats, p_att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size * sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size * sample_n, self.seq_length, self.vocab_size + 1)
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
weight = self.logit.weight if hasattr(self.logit, 'weight') else self.logit[0].weight
state = (weight.new_zeros(self.num_layers, beam_size, self.rnn_size), weight.new_zeros(self.num_layers, beam_size, self.rnn_size))
(tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks) = utils.repeat_tensors(beam_size, [p_fc_feats[k:k + 1], p_att_feats[k:k + 1], pp_att_feats[k:k + 1], p_att_masks[k:k + 1] if att_masks is not None else None])
for t in range(1):
if t == 0:
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
(output, state) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, show_gate_labels, task)
else:
(output, state, word_box_attn) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
if task == 'caption':
(logprobs, state) = (logprobs, state, word_box_attn)
else:
(logprobs, state) = (logprobs, state)
elif task == 'both':
(output, state, output_trace) = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks, state, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
(logprobs, state) = (logprobs, state, self.model.generator_trace(output_trace))
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k * sample_n + _n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k * sample_n + _n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq']
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
return (seq, seqLogprobs)
|
connect-caption-and-trace
|
positive
|
def test_RS256_token_signature_passes(self):
token = 'eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL3Rva2Vucy10ZXN0LmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHwxMjM0NTY3ODkiLCJhdWQiOlsidG9rZW5zLXRlc3QtMTIzIiwiZXh0ZXJuYWwtdGVzdC05OTkiXSwiZXhwIjoxNTg3NzY1MzYxLCJpYXQiOjE1ODc1OTI1NjEsIm5vbmNlIjoiYTFiMmMzZDRlNSIsImF6cCI6InRva2Vucy10ZXN0LTEyMyIsImF1dGhfdGltZSI6MTU4NzY3ODk2MX0.Eo2jxdlrKmutIeyn9Un6VHorMJaCL5txPDCC3QiAQn0pYYnrRU7VMQwqbTiXLQ9zPYh5Q4pQmT-XRaGL-HwDH8vCUieVJKOm0-gNFAMzx1i8sRH1ubw75sn69y09AQKcitYtjnBmahgfZrswtsxOXM7XovlLftPjv6goAi_U38GYsS_V_zOBvdbX2cM5zdooJAC0e7vlCr3bXNo90qwgCuezvCGt1ZrgWyDNO9oMzK-TlK86q36LuIkux7XZboF5rc3zsThEce_tPufA5qoEa-7I_ybmjwlvOCWmngYLT52_S2CbHeRNarePMjZIlmAuG-DcetwO8jJsX84Ra0SdUw'
<DeepExtract>
verifier = AsymmetricSignatureVerifier('some URL')
verifier._fetch_key = MagicMock('_fetch_key')
verifier._fetch_key.return_value = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(RSA_PUB_KEY_1_JWK))
sv = verifier
</DeepExtract>
tv = TokenVerifier(signature_verifier=sv, issuer=expectations['issuer'], audience=expectations['audience'])
tv._clock = MOCKED_CLOCK
tv.verify(token)
|
def test_RS256_token_signature_passes(self):
token = 'eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL3Rva2Vucy10ZXN0LmF1dGgwLmNvbS8iLCJzdWIiOiJhdXRoMHwxMjM0NTY3ODkiLCJhdWQiOlsidG9rZW5zLXRlc3QtMTIzIiwiZXh0ZXJuYWwtdGVzdC05OTkiXSwiZXhwIjoxNTg3NzY1MzYxLCJpYXQiOjE1ODc1OTI1NjEsIm5vbmNlIjoiYTFiMmMzZDRlNSIsImF6cCI6InRva2Vucy10ZXN0LTEyMyIsImF1dGhfdGltZSI6MTU4NzY3ODk2MX0.Eo2jxdlrKmutIeyn9Un6VHorMJaCL5txPDCC3QiAQn0pYYnrRU7VMQwqbTiXLQ9zPYh5Q4pQmT-XRaGL-HwDH8vCUieVJKOm0-gNFAMzx1i8sRH1ubw75sn69y09AQKcitYtjnBmahgfZrswtsxOXM7XovlLftPjv6goAi_U38GYsS_V_zOBvdbX2cM5zdooJAC0e7vlCr3bXNo90qwgCuezvCGt1ZrgWyDNO9oMzK-TlK86q36LuIkux7XZboF5rc3zsThEce_tPufA5qoEa-7I_ybmjwlvOCWmngYLT52_S2CbHeRNarePMjZIlmAuG-DcetwO8jJsX84Ra0SdUw'
verifier = AsymmetricSignatureVerifier('some URL')
verifier._fetch_key = MagicMock('_fetch_key')
verifier._fetch_key.return_value = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(RSA_PUB_KEY_1_JWK))
sv = verifier
tv = TokenVerifier(signature_verifier=sv, issuer=expectations['issuer'], audience=expectations['audience'])
tv._clock = MOCKED_CLOCK
tv.verify(token)
|
auth0-python
|
positive
|
@staticmethod
def process_target_dnsmasq():
cmd = ['%s' % KernelVirtualMachinePlayer.rkt_bin, '--local-config=%s' % KernelVirtualMachinePlayer.tests_path, '--mount', 'volume=config,target=/etc/dnsmasq.conf', '--mount', 'volume=resolv,target=/etc/resolv.conf', 'run', 'enjoliver.local/dnsmasq:latest', '--insecure-options=all', '--net=host', '--interactive', '--caps-retain=all', '--set-env=TERM=%s' % os.getenv('TERM', 'xterm'), '--uuid-file-save=/tmp/dnsmasq.uuid', '--volume', 'resolv,kind=host,source=/etc/resolv.conf', '--volume', 'config,kind=host,source=%s/dnsmasq-rack0.conf' % KernelVirtualMachinePlayer.tests_path]
<DeepExtract>
for i in range(5):
try:
print('PID -> %s\nexec -> %s' % (os.getpid(), ' '.join(cmd)))
return
except BlockingIOError:
time.sleep(0.01)
</DeepExtract>
sys.stdout.flush()
os.execve(cmd[0], cmd, os.environ)
os._exit(2)
|
@staticmethod
def process_target_dnsmasq():
cmd = ['%s' % KernelVirtualMachinePlayer.rkt_bin, '--local-config=%s' % KernelVirtualMachinePlayer.tests_path, '--mount', 'volume=config,target=/etc/dnsmasq.conf', '--mount', 'volume=resolv,target=/etc/resolv.conf', 'run', 'enjoliver.local/dnsmasq:latest', '--insecure-options=all', '--net=host', '--interactive', '--caps-retain=all', '--set-env=TERM=%s' % os.getenv('TERM', 'xterm'), '--uuid-file-save=/tmp/dnsmasq.uuid', '--volume', 'resolv,kind=host,source=/etc/resolv.conf', '--volume', 'config,kind=host,source=%s/dnsmasq-rack0.conf' % KernelVirtualMachinePlayer.tests_path]
for i in range(5):
try:
print('PID -> %s\nexec -> %s' % (os.getpid(), ' '.join(cmd)))
return
except BlockingIOError:
time.sleep(0.01)
sys.stdout.flush()
os.execve(cmd[0], cmd, os.environ)
os._exit(2)
|
enjoliver
|
positive
|
def get(self, aggregate_id: UUID, version: Optional[int]=None, projector_func: ProjectorFunction[TMutableOrImmutableAggregate, TDomainEvent]=project_aggregate, fastforward_skipping: bool=False, deepcopy_from_cache: bool=True) -> TMutableOrImmutableAggregate:
"""
Reconstructs an :class:`~eventsourcing.domain.Aggregate` for a
given ID from stored events, optionally at a particular version.
"""
if self.cache and version is None:
try:
aggregate = cast(TMutableOrImmutableAggregate, self.cache.get(aggregate_id))
except KeyError:
<DeepExtract>
gt: Optional[int] = None
if self.snapshot_store is not None:
snapshots = list(self.snapshot_store.get(originator_id=aggregate_id, desc=True, limit=1, lte=None))
if snapshots:
gt = snapshots[0].originator_version
else:
snapshots = []
aggregate_events = self.event_store.get(originator_id=aggregate_id, gt=gt, lte=None)
initial: Optional[TMutableOrImmutableAggregate] = None
aggregate = projector_func(initial, chain(cast(Iterable[TDomainEvent], snapshots), cast(Iterable[TDomainEvent], aggregate_events)))
if aggregate is None:
raise AggregateNotFound((aggregate_id, None))
else:
aggregate = aggregate
</DeepExtract>
self.cache.put(aggregate_id, aggregate)
else:
if self.fastforward:
<DeepExtract>
with self._fastforward_locks_lock:
try:
(lock, num_users) = self._fastforward_locks_inuse[aggregate_id]
except KeyError:
try:
lock = self._fastforward_locks_cache.get(aggregate_id, evict=True)
except KeyError:
lock = Lock()
finally:
num_users = 0
finally:
num_users += 1
self._fastforward_locks_inuse[aggregate_id] = (lock, num_users)
fastforward_lock = lock
</DeepExtract>
blocking = not (fastforward_skipping or self.fastforward_skipping)
try:
if fastforward_lock.acquire(blocking=blocking):
try:
new_events = self.event_store.get(originator_id=aggregate_id, gt=aggregate.version)
_aggregate = projector_func(aggregate, cast(Iterable[TDomainEvent], new_events))
if _aggregate is None:
raise AggregateNotFound(aggregate_id)
else:
aggregate = _aggregate
finally:
fastforward_lock.release()
finally:
<DeepExtract>
with self._fastforward_locks_lock:
(lock_, num_users) = self._fastforward_locks_inuse[aggregate_id]
num_users -= 1
if num_users == 0:
del self._fastforward_locks_inuse[aggregate_id]
self._fastforward_locks_cache.put(aggregate_id, lock_)
else:
self._fastforward_locks_inuse[aggregate_id] = (lock_, num_users)
</DeepExtract>
if deepcopy_from_cache and self.deepcopy_from_cache:
aggregate = deepcopy(aggregate)
else:
<DeepExtract>
gt: Optional[int] = None
if self.snapshot_store is not None:
snapshots = list(self.snapshot_store.get(originator_id=aggregate_id, desc=True, limit=1, lte=version))
if snapshots:
gt = snapshots[0].originator_version
else:
snapshots = []
aggregate_events = self.event_store.get(originator_id=aggregate_id, gt=gt, lte=version)
initial: Optional[TMutableOrImmutableAggregate] = None
aggregate = projector_func(initial, chain(cast(Iterable[TDomainEvent], snapshots), cast(Iterable[TDomainEvent], aggregate_events)))
if aggregate is None:
raise AggregateNotFound((aggregate_id, version))
else:
aggregate = aggregate
</DeepExtract>
return aggregate
|
def get(self, aggregate_id: UUID, version: Optional[int]=None, projector_func: ProjectorFunction[TMutableOrImmutableAggregate, TDomainEvent]=project_aggregate, fastforward_skipping: bool=False, deepcopy_from_cache: bool=True) -> TMutableOrImmutableAggregate:
"""
Reconstructs an :class:`~eventsourcing.domain.Aggregate` for a
given ID from stored events, optionally at a particular version.
"""
if self.cache and version is None:
try:
aggregate = cast(TMutableOrImmutableAggregate, self.cache.get(aggregate_id))
except KeyError:
gt: Optional[int] = None
if self.snapshot_store is not None:
snapshots = list(self.snapshot_store.get(originator_id=aggregate_id, desc=True, limit=1, lte=None))
if snapshots:
gt = snapshots[0].originator_version
else:
snapshots = []
aggregate_events = self.event_store.get(originator_id=aggregate_id, gt=gt, lte=None)
initial: Optional[TMutableOrImmutableAggregate] = None
aggregate = projector_func(initial, chain(cast(Iterable[TDomainEvent], snapshots), cast(Iterable[TDomainEvent], aggregate_events)))
if aggregate is None:
raise AggregateNotFound((aggregate_id, None))
else:
aggregate = aggregate
self.cache.put(aggregate_id, aggregate)
else:
if self.fastforward:
with self._fastforward_locks_lock:
try:
(lock, num_users) = self._fastforward_locks_inuse[aggregate_id]
except KeyError:
try:
lock = self._fastforward_locks_cache.get(aggregate_id, evict=True)
except KeyError:
lock = Lock()
finally:
num_users = 0
finally:
num_users += 1
self._fastforward_locks_inuse[aggregate_id] = (lock, num_users)
fastforward_lock = lock
blocking = not (fastforward_skipping or self.fastforward_skipping)
try:
if fastforward_lock.acquire(blocking=blocking):
try:
new_events = self.event_store.get(originator_id=aggregate_id, gt=aggregate.version)
_aggregate = projector_func(aggregate, cast(Iterable[TDomainEvent], new_events))
if _aggregate is None:
raise AggregateNotFound(aggregate_id)
else:
aggregate = _aggregate
finally:
fastforward_lock.release()
finally:
with self._fastforward_locks_lock:
(lock_, num_users) = self._fastforward_locks_inuse[aggregate_id]
num_users -= 1
if num_users == 0:
del self._fastforward_locks_inuse[aggregate_id]
self._fastforward_locks_cache.put(aggregate_id, lock_)
else:
self._fastforward_locks_inuse[aggregate_id] = (lock_, num_users)
if deepcopy_from_cache and self.deepcopy_from_cache:
aggregate = deepcopy(aggregate)
else:
gt: Optional[int] = None
if self.snapshot_store is not None:
snapshots = list(self.snapshot_store.get(originator_id=aggregate_id, desc=True, limit=1, lte=version))
if snapshots:
gt = snapshots[0].originator_version
else:
snapshots = []
aggregate_events = self.event_store.get(originator_id=aggregate_id, gt=gt, lte=version)
initial: Optional[TMutableOrImmutableAggregate] = None
aggregate = projector_func(initial, chain(cast(Iterable[TDomainEvent], snapshots), cast(Iterable[TDomainEvent], aggregate_events)))
if aggregate is None:
raise AggregateNotFound((aggregate_id, version))
else:
aggregate = aggregate
return aggregate
|
eventsourcing
|
positive
|
@responses.activate
def test_get_env_cloud_details_mapping_google():
metadata = {'attributes': {}, 'cpuPlatform': 'Intel Haswell', 'zone': 'projects/253764845563/zones/us-central1-a'}
<DeepExtract>
for (provider, params) in CLOUD_METADATA_MAPPING.items():
if provider == 'GCP':
responses.add(responses.GET, params['url'], json=metadata, status=200)
else:
responses.add(responses.GET, params['url'], status=404)
</DeepExtract>
expected_metadata = metadata
del expected_metadata['attributes']
assert get_env_cloud_details() == {'provider': 'GCP', 'metadata': metadata}
|
@responses.activate
def test_get_env_cloud_details_mapping_google():
metadata = {'attributes': {}, 'cpuPlatform': 'Intel Haswell', 'zone': 'projects/253764845563/zones/us-central1-a'}
for (provider, params) in CLOUD_METADATA_MAPPING.items():
if provider == 'GCP':
responses.add(responses.GET, params['url'], json=metadata, status=200)
else:
responses.add(responses.GET, params['url'], status=404)
expected_metadata = metadata
del expected_metadata['attributes']
assert get_env_cloud_details() == {'provider': 'GCP', 'metadata': metadata}
|
codecarbon
|
positive
|
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
max_num_tokens = max_seq_length - 3
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
<DeepExtract>
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
</DeepExtract>
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
<DeepExtract>
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == '[CLS]' or token == '[SEP]':
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
if rng.random() < 0.8:
masked_token = '[MASK]'
elif rng.random() < 0.5:
masked_token = tokens[index]
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
(tokens, masked_lm_positions, masked_lm_labels) = (output_tokens, masked_lm_positions, masked_lm_labels)
</DeepExtract>
instance = TrainingInstance(tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
|
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
max_num_tokens = max_seq_length - 3
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
for _ in range(10):
random_document_index = rng.randint(0, len(all_documents) - 1)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == '[CLS]' or token == '[SEP]':
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
if rng.random() < 0.8:
masked_token = '[MASK]'
elif rng.random() < 0.5:
masked_token = tokens[index]
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
(tokens, masked_lm_positions, masked_lm_labels) = (output_tokens, masked_lm_positions, masked_lm_labels)
instance = TrainingInstance(tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
|
bluebert
|
positive
|
def test_208_centos8_httpd_user_dockerfile(self):
""" WHEN using a dockerfile for systemd-enabled CentOS 8 and python3,
THEN we can create an image with an Apache HTTP service
being installed and enabled.
AND in this variant it runs under User=httpd right
there from PID-1 started implicity in --user mode.
THEN it succeeds if modified"""
if not os.path.exists(DOCKER_SOCKET):
self.skipTest('docker-based test')
docker = _docker
curl = _curl
python = _python or _python3
if not python.endswith('python3'):
self.skipTest('using python3 on centos:8')
<DeepExtract>
name = self.caller_testname()
if suffix:
testname = name + '_' + suffix
testname = name
</DeepExtract>
<DeepExtract>
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
os.makedirs(newdir)
testdir = newdir
</DeepExtract>
name = 'centos8-httpd'
dockerfile = 'centos8-httpd-user.dockerfile'
<DeepExtract>
image = ''
for line in open(dockerfile):
m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line)
if m:
image = m.group(1)
break
m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line)
if m:
image = m.group(1).strip()
break
logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image)
if image:
addhosts = self.start_mirror(image, extras)
addhosts = ''
</DeepExtract>
<DeepExtract>
savename = os.path.splitext(os.path.basename(dockerfile))[0]
</DeepExtract>
saveto = SAVETO
images = IMAGES
cmd = '{docker} build . -f {dockerfile} {addhosts} --tag {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rm --force {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} run -d --name {testname} {images}:{testname} sleep 300'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} exec {testname} systemctl start httpd --user'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = run.communicate()
(out, err, end) = (decodes(out), decodes(err), run.returncode)
</DeepExtract>
logg.info(' %s =>%s\n%s\n%s', cmd, end, out, err)
self.assertEqual(end, 0)
cmd = '{docker} rm -f {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} run -d --name {testname} {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
<DeepExtract>
docker = _docker
cmd = '{docker} inspect {name}'
values = output(cmd.format(**locals()))
values = json.loads(values)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', testname, values)
container = values[0]['NetworkSettings']['IPAddress']
</DeepExtract>
cmd = 'sleep 5; {curl} -o {testdir}/{testname}.txt http://{container}:8080'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = 'grep OK {testdir}/{testname}.txt'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} exec {testname} ps axu'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE)
(out, err) = run.communicate()
(out, end) = (decodes(out), run.returncode)
</DeepExtract>
logg.info(' %s =>%s\n%s', cmd, end, out)
self.assertTrue(greps(out, 'apache.*python.*systemctl'))
self.assertFalse(greps(out, 'root'))
cmd = '{docker} stop {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rm --force {testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rmi {saveto}/{savename}:latest'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
</DeepExtract>
cmd = '{docker} rmi {images}:{testname}'
<DeepExtract>
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
</DeepExtract>
<DeepExtract>
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
return newdir
</DeepExtract>
|
def test_208_centos8_httpd_user_dockerfile(self):
""" WHEN using a dockerfile for systemd-enabled CentOS 8 and python3,
THEN we can create an image with an Apache HTTP service
being installed and enabled.
AND in this variant it runs under User=httpd right
there from PID-1 started implicity in --user mode.
THEN it succeeds if modified"""
if not os.path.exists(DOCKER_SOCKET):
self.skipTest('docker-based test')
docker = _docker
curl = _curl
python = _python or _python3
if not python.endswith('python3'):
self.skipTest('using python3 on centos:8')
name = self.caller_testname()
if suffix:
testname = name + '_' + suffix
testname = name
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
os.makedirs(newdir)
testdir = newdir
name = 'centos8-httpd'
dockerfile = 'centos8-httpd-user.dockerfile'
image = ''
for line in open(dockerfile):
m = re.match('[Ff][Rr][Oo][Mm] *"([^"]*)"', line)
if m:
image = m.group(1)
break
m = re.match('[Ff][Rr][Oo][Mm] *(\\w[^ ]*)', line)
if m:
image = m.group(1).strip()
break
logg.debug("--\n-- '%s' FROM '%s'", dockerfile, image)
if image:
addhosts = self.start_mirror(image, extras)
addhosts = ''
savename = os.path.splitext(os.path.basename(dockerfile))[0]
saveto = SAVETO
images = IMAGES
cmd = '{docker} build . -f {dockerfile} {addhosts} --tag {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rm --force {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
cmd = '{docker} run -d --name {testname} {images}:{testname} sleep 300'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} exec {testname} systemctl start httpd --user'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = run.communicate()
(out, err, end) = (decodes(out), decodes(err), run.returncode)
logg.info(' %s =>%s\n%s\n%s', cmd, end, out, err)
self.assertEqual(end, 0)
cmd = '{docker} rm -f {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} run -d --name {testname} {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
docker = _docker
cmd = '{docker} inspect {name}'
values = output(cmd.format(**locals()))
values = json.loads(values)
if not values or 'NetworkSettings' not in values[0]:
logg.critical(' docker inspect %s => %s ', testname, values)
container = values[0]['NetworkSettings']['IPAddress']
cmd = 'sleep 5; {curl} -o {testdir}/{testname}.txt http://{container}:8080'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = 'grep OK {testdir}/{testname}.txt'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} exec {testname} ps axu'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
run = subprocess.Popen(cmd.format(**locals()), shell=shell, stdout=subprocess.PIPE)
(out, err) = run.communicate()
(out, end) = (decodes(out), run.returncode)
logg.info(' %s =>%s\n%s', cmd, end, out)
self.assertTrue(greps(out, 'apache.*python.*systemctl'))
self.assertFalse(greps(out, 'root'))
cmd = '{docker} stop {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rm --force {testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rmi {saveto}/{savename}:latest'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
cmd = '{docker} tag {images}:{testname} {saveto}/{savename}:latest'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.check_call(cmd.format(**locals()), shell=shell)
cmd = '{docker} rmi {images}:{testname}'
if isinstance(cmd.format(**locals()), basestring):
logg.info(': %s', cmd.format(**locals()))
else:
logg.info(': %s', ' '.join(["'%s'" % item for item in cmd.format(**locals())]))
return subprocess.call(cmd.format(**locals()), shell=shell)
testname = testname or self.caller_testname()
newdir = 'tmp/tmp.' + testname
if os.path.isdir(newdir):
shutil.rmtree(newdir)
return newdir
</DeepExtract>
|
docker-systemctl-images
|
positive
|
def format_db(self, db_fasta_file, db_name):
<DeepExtract>
raise NotImplementedError
</DeepExtract>
<DeepExtract>
self.logger.debug('Running command "{0}" ({1})'.format(' '.join(cmd), self.__class__.__name__))
if False:
cmd = ' '.join(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
(out, err) = p.communicate()
if p.returncode != 0 and err:
self.logger.error('Process aborted due to an error in {self.__class__.__name__}.'.format(self=self))
self.logger.error(err.decode('utf8'))
exit(1)
return (out, err)
</DeepExtract>
|
def format_db(self, db_fasta_file, db_name):
raise NotImplementedError
self.logger.debug('Running command "{0}" ({1})'.format(' '.join(cmd), self.__class__.__name__))
if False:
cmd = ' '.join(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)
(out, err) = p.communicate()
if p.returncode != 0 and err:
self.logger.error('Process aborted due to an error in {self.__class__.__name__}.'.format(self=self))
self.logger.error(err.decode('utf8'))
exit(1)
return (out, err)
</DeepExtract>
|
dfast_core
|
positive
|
def choose(self, do, y_pred, sigma, return_values=False):
if not self.init:
<DeepExtract>
X_samples = np.array(do.X_samples)
epoch_idx = do.params.index('nepochs')
max_epoch = np.max(X_samples[:, epoch_idx])
while True:
for (idx, x) in enumerate(do.X_samples):
if x[epoch_idx] == max_epoch:
if return_values:
yield (idx, np.zeros(len(y_pred)))
else:
yield idx
</DeepExtract>
self.init = True
return self.gen.next()
|
def choose(self, do, y_pred, sigma, return_values=False):
if not self.init:
X_samples = np.array(do.X_samples)
epoch_idx = do.params.index('nepochs')
max_epoch = np.max(X_samples[:, epoch_idx])
while True:
for (idx, x) in enumerate(do.X_samples):
if x[epoch_idx] == max_epoch:
if return_values:
yield (idx, np.zeros(len(y_pred)))
else:
yield idx
self.init = True
return self.gen.next()
|
ddnn
|
positive
|
def pre_merge_netag(self, instance):
ne_spans = instance.get_ne_span(PRE_MERGE_NETAG)
for (ne_id, span) in ne_spans.items():
for sp_id in span:
if sp_id != ne_id and ne_id in self.nodes and (sp_id in self.nodes):
<DeepExtract>
tmp1 = ne_id
tmp2 = sp_id
ne_id = tmp1 if tmp1 < tmp2 else tmp2
sp_id = tmp2 if tmp1 < tmp2 else tmp1
self.nodes[ne_id].end = self.nodes[sp_id].end if self.nodes[ne_id].end < self.nodes[sp_id].end else self.nodes[ne_id].end
self.nodes[ne_id].words.extend(self.nodes[sp_id].words)
for p in self.nodes[sp_id].parents[:]:
if p != ne_id and p not in self.nodes[ne_id].parents:
edge_label = self.get_edge_label(p, sp_id)
self.add_edge(p, ne_id, edge_label)
for c in self.nodes[sp_id].children[:]:
if c != ne_id and c not in self.nodes[ne_id].children:
edge_label = self.get_edge_label(sp_id, c)
self.add_edge(ne_id, c, edge_label)
self.nodes[ne_id].SWAPPED = False
self.nodes[ne_id].incoming_traces = self.nodes[ne_id].incoming_traces | self.nodes[sp_id].incoming_traces
self.remove_node(sp_id)
</DeepExtract>
if sp_id in self.multi_roots:
self.multi_roots.remove(sp_id)
|
def pre_merge_netag(self, instance):
ne_spans = instance.get_ne_span(PRE_MERGE_NETAG)
for (ne_id, span) in ne_spans.items():
for sp_id in span:
if sp_id != ne_id and ne_id in self.nodes and (sp_id in self.nodes):
tmp1 = ne_id
tmp2 = sp_id
ne_id = tmp1 if tmp1 < tmp2 else tmp2
sp_id = tmp2 if tmp1 < tmp2 else tmp1
self.nodes[ne_id].end = self.nodes[sp_id].end if self.nodes[ne_id].end < self.nodes[sp_id].end else self.nodes[ne_id].end
self.nodes[ne_id].words.extend(self.nodes[sp_id].words)
for p in self.nodes[sp_id].parents[:]:
if p != ne_id and p not in self.nodes[ne_id].parents:
edge_label = self.get_edge_label(p, sp_id)
self.add_edge(p, ne_id, edge_label)
for c in self.nodes[sp_id].children[:]:
if c != ne_id and c not in self.nodes[ne_id].children:
edge_label = self.get_edge_label(sp_id, c)
self.add_edge(ne_id, c, edge_label)
self.nodes[ne_id].SWAPPED = False
self.nodes[ne_id].incoming_traces = self.nodes[ne_id].incoming_traces | self.nodes[sp_id].incoming_traces
self.remove_node(sp_id)
if sp_id in self.multi_roots:
self.multi_roots.remove(sp_id)
|
camr
|
positive
|
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
<DeepExtract>
raise NotImplementedError
</DeepExtract>
self.tags[key] = value
|
def __setitem__(self, key, value):
"""Set a metadata tag.
If the file has no tags, an appropriate format is added (but
not written until save is called).
"""
if self.tags is None:
raise NotImplementedError
self.tags[key] = value
|
AvalonXmlAgent.bundle
|
positive
|
def test_random(self):
context = bpy.context
prop = context.scene.test_prop
prop.type = 'RANDOM'
for s in range(100):
prop.seed = random.randrange(0, 10000)
res = build(context, prop)
self.assertIsNotNone(res)
<DeepExtract>
[bpy.data.objects.remove(o) for o in bpy.data.objects]
</DeepExtract>
prop.random_extension_amount = False
for i in range(1, 5):
prop.extension_amount = i
res = build(context, prop)
self.assertIsNotNone(res)
faces = context.object.data.polygons
self.assertEquals(len(faces), i + 1)
<DeepExtract>
[bpy.data.objects.remove(o) for o in bpy.data.objects]
</DeepExtract>
|
def test_random(self):
context = bpy.context
prop = context.scene.test_prop
prop.type = 'RANDOM'
for s in range(100):
prop.seed = random.randrange(0, 10000)
res = build(context, prop)
self.assertIsNotNone(res)
[bpy.data.objects.remove(o) for o in bpy.data.objects]
prop.random_extension_amount = False
for i in range(1, 5):
prop.extension_amount = i
res = build(context, prop)
self.assertIsNotNone(res)
faces = context.object.data.polygons
self.assertEquals(len(faces), i + 1)
[bpy.data.objects.remove(o) for o in bpy.data.objects]
</DeepExtract>
|
building_tools
|
positive
|
def load_target_image(self, feature_image, input_filenames):
if self.target_index is None:
target_image = copy_image(feature_image)
else:
if self.extract_sub_volumes:
sub_volume_indices = input_filenames[self.target_sub_volumes_index]
else:
sub_volume_indices = None
<DeepExtract>
filename = input_filenames[self.target_index]
image = load_image(filename, force_4d=True, reorder=False, interpolation=self.target_interpolation, dtype=self.dtype, verbose=self.verbose)
if sub_volume_indices:
image = extract_sub_volumes(image, sub_volume_indices)
target_image = image
</DeepExtract>
return target_image
|
def load_target_image(self, feature_image, input_filenames):
if self.target_index is None:
target_image = copy_image(feature_image)
else:
if self.extract_sub_volumes:
sub_volume_indices = input_filenames[self.target_sub_volumes_index]
else:
sub_volume_indices = None
filename = input_filenames[self.target_index]
image = load_image(filename, force_4d=True, reorder=False, interpolation=self.target_interpolation, dtype=self.dtype, verbose=self.verbose)
if sub_volume_indices:
image = extract_sub_volumes(image, sub_volume_indices)
target_image = image
return target_image
|
3DUnetCNN
|
positive
|
def deploy_contracts(self):
tcf_home = os.environ.get('TCF_HOME', '../../')
<DeepExtract>
if not exists(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file']):
raise FileNotFoundError('File not found at path: {0}'.format(realpath(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file'])))
compiled_sol = self.__eth_client.compile_source_file(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file'])
(contract_id, contract_interface) = compiled_sol.popitem()
address = self.__eth_client.deploy_contract(contract_interface)
logging.info('Deployed %s to: %s\n', contract_id, address)
return {'status': 'success'}
</DeepExtract>
<DeepExtract>
if not exists(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file']):
raise FileNotFoundError('File not found at path: {0}'.format(realpath(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file'])))
compiled_sol = self.__eth_client.compile_source_file(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file'])
(contract_id, contract_interface) = compiled_sol.popitem()
address = self.__eth_client.deploy_contract(contract_interface)
logging.info('Deployed %s to: %s\n', contract_id, address)
return {'status': 'success'}
</DeepExtract>
|
def deploy_contracts(self):
tcf_home = os.environ.get('TCF_HOME', '../../')
if not exists(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file']):
raise FileNotFoundError('File not found at path: {0}'.format(realpath(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file'])))
compiled_sol = self.__eth_client.compile_source_file(tcf_home + '/' + self.__config['ethereum']['direct_registry_contract_file'])
(contract_id, contract_interface) = compiled_sol.popitem()
address = self.__eth_client.deploy_contract(contract_interface)
logging.info('Deployed %s to: %s\n', contract_id, address)
return {'status': 'success'}
if not exists(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file']):
raise FileNotFoundError('File not found at path: {0}'.format(realpath(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file'])))
compiled_sol = self.__eth_client.compile_source_file(tcf_home + '/' + self.__config['ethereum']['worker_registry_contract_file'])
(contract_id, contract_interface) = compiled_sol.popitem()
address = self.__eth_client.deploy_contract(contract_interface)
logging.info('Deployed %s to: %s\n', contract_id, address)
return {'status': 'success'}
</DeepExtract>
|
avalon
|
positive
|
def get_name(self, key):
<DeepExtract>
value = dict.get(self, key, marker)
if value is not marker:
value = value
root = getattr(self, '_root', marker)
if root is not marker:
value = dict.get(root, key, marker)
if value is not marker:
value = value
value = marker
</DeepExtract>
if value is marker:
raise NameError(key)
return value
|
def get_name(self, key):
value = dict.get(self, key, marker)
if value is not marker:
value = value
root = getattr(self, '_root', marker)
if root is not marker:
value = dict.get(root, key, marker)
if value is not marker:
value = value
value = marker
if value is marker:
raise NameError(key)
return value
|
chameleon
|
positive
|
def update_info(self, time_limit=None, memory_limit=None, interactive=None):
"""
Updates problem info
:param time_limit: time limit in milliseconds or None if no update required
:param memory_limit: memory limit in MB or None if no update required
:param interactive: boolean, if problem is interactive, or None if no update required
:returns boolean, if updated
"""
<DeepExtract>
if time_limit is not None:
setattr(self.revision, 'time_limit', time_limit)
</DeepExtract>
<DeepExtract>
if memory_limit is not None:
setattr(self.revision, 'memory_limit', memory_limit)
</DeepExtract>
return True
|
def update_info(self, time_limit=None, memory_limit=None, interactive=None):
"""
Updates problem info
:param time_limit: time limit in milliseconds or None if no update required
:param memory_limit: memory limit in MB or None if no update required
:param interactive: boolean, if problem is interactive, or None if no update required
:returns boolean, if updated
"""
if time_limit is not None:
setattr(self.revision, 'time_limit', time_limit)
if memory_limit is not None:
setattr(self.revision, 'memory_limit', memory_limit)
return True
|
eoj3
|
positive
|
def fetch_exchange(zone_key1: str, zone_key2: str, session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger(__name__)) -> Dict[str, Any]:
"""Requests the last known power exchange (in MW) between two regions."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
if sorted_zone_keys != 'CA-NB->CA-PE':
raise NotImplementedError('This exchange pair is not implemented')
requests_obj = session or Session()
<DeepExtract>
url = 'https://wdf.princeedwardisland.ca/api/workflow'
request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}
headers = {'Content-Type': 'application/json'}
response = requests_obj.post(url, data=json.dumps(request), headers=headers)
raw_data = response.json().get('data') or []
datetime_item = [item['data']['text'] for item in raw_data if 'text' in item['data']]
if not datetime_item:
pei_info = None
datetime_text = datetime_item[0][len('Last updated '):]
data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')
data = {'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'), 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'), 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'), 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'), 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'), 'datetime': data_timestamp.datetime}
if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:
pei_info = None
pei_info = data
</DeepExtract>
if pei_info is None or pei_info['pei_load'] is None:
return None
imported_from_nb = pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen']
data = {'datetime': pei_info['datetime'], 'sortedZoneKeys': sorted_zone_keys, 'netFlow': imported_from_nb, 'source': 'princeedwardisland.ca'}
return data
|
def fetch_exchange(zone_key1: str, zone_key2: str, session: Optional[Session]=None, target_datetime: Optional[datetime]=None, logger: Logger=getLogger(__name__)) -> Dict[str, Any]:
"""Requests the last known power exchange (in MW) between two regions."""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
if sorted_zone_keys != 'CA-NB->CA-PE':
raise NotImplementedError('This exchange pair is not implemented')
requests_obj = session or Session()
url = 'https://wdf.princeedwardisland.ca/api/workflow'
request = {'featureName': 'WindEnergy', 'queryName': 'WindEnergy'}
headers = {'Content-Type': 'application/json'}
response = requests_obj.post(url, data=json.dumps(request), headers=headers)
raw_data = response.json().get('data') or []
datetime_item = [item['data']['text'] for item in raw_data if 'text' in item['data']]
if not datetime_item:
pei_info = None
datetime_text = datetime_item[0][len('Last updated '):]
data_timestamp = arrow.get(datetime_text, 'MMMM D, YYYY HH:mm A').replace(tzinfo='Canada/Atlantic')
data = {'pei_load': _find_pei_key(raw_data, 'Total On-Island Load'), 'pei_wind_gen': _find_pei_key(raw_data, 'Total On-Island Wind Generation'), 'pei_fossil_gen': _find_pei_key(raw_data, 'Total On-Island Fossil Fuel Generation'), 'pei_wind_used': _find_pei_key(raw_data, 'Wind Power Used On Island'), 'pei_wind_exported': _find_pei_key(raw_data, 'Wind Power Exported Off Island'), 'datetime': data_timestamp.datetime}
if data['pei_wind_gen'] is None or data['pei_fossil_gen'] is None:
pei_info = None
pei_info = data
if pei_info is None or pei_info['pei_load'] is None:
return None
imported_from_nb = pei_info['pei_load'] - pei_info['pei_fossil_gen'] - pei_info['pei_wind_gen']
data = {'datetime': pei_info['datetime'], 'sortedZoneKeys': sorted_zone_keys, 'netFlow': imported_from_nb, 'source': 'princeedwardisland.ca'}
return data
|
electricitymap-contrib
|
positive
|
def power2(base, exp):
if exp == 0:
return 1
else:
<DeepExtract>
val = 1
for _ in range(exp // 2):
val *= x
partial = val
</DeepExtract>
result = partial * partial
if exp % 2 == 1:
result *= base
return result
|
def power2(base, exp):
if exp == 0:
return 1
else:
val = 1
for _ in range(exp // 2):
val *= x
partial = val
result = partial * partial
if exp % 2 == 1:
result *= base
return result
|
code-catalog-python
|
positive
|
def run_replay_analysis(replay, cluster_endpoint, start_time, end_time, bucket_url, iam_role, user, tag='', workload='', is_serverless=False, secret_name=None, nlb_nat_dns=None, complete=True, stats=None, summary=None):
"""End to end data collection, parsing, analysis and pdf generation
@param replay: str, replay id from replay.py
@param cluster_endpoint: str, target cluster endpoint
@param start_time: datetime object, start time of replay
@param end_time: datetime object, end time of replay
@param bucket_url: str, S3 bucket location
@param iam_role: str, IAM ARN for unload
@param user: str, master username for cluster
@param tag: str, optional identifier
@param is_serverless: bool, serverless or provisioned cluster
@param secret_name: str, name of the secret that stores admin username and password
@param nlb_nat_dns: str, dns endpoint if specified will be used to connect instead of target cluster endpoint
@param complete: bool, complete/incomplete replay run
@param stats: dict, run details
@param summary: str list, replay output summary from replay.py
"""
logger = logging.getLogger('SimpleReplayLogger')
s3_client = boto3.client('s3')
cluster = cluster_dict(cluster_endpoint, is_serverless, start_time, end_time)
cluster['is_serverless'] = is_serverless
cluster['secret_name'] = secret_name
cluster['host'] = nlb_nat_dns if nlb_nat_dns != None else cluster['host']
if type(bucket_url) is str:
bucket = bucket_dict(bucket_url)
logger.debug(bucket)
logger.info(f'Running analysis for replay: {replay}')
replay_path = f"{bucket['prefix']}analysis/{replay}"
<DeepExtract>
logger = logging.getLogger('SimpleReplayLogger')
directory = 'sql/serverless'
queries = []
with initiate_connection(username=user, cluster=cluster) as conn:
cursor = conn.cursor()
logger.info(f"Querying {cluster.get('id')}. This may take some time.")
for file in sorted(os.listdir(directory)):
if not file.endswith('.sql'):
continue
with open(f'{directory}/{file}', 'r') as query_file:
query_name = os.path.splitext(file)[0]
logger.debug(f'Query: {query_name}')
queries.append(query_name)
query = query_file.read()
query = re.sub('{{START_TIME}}', f"'{cluster.get('start_time')}'", query)
query = re.sub('{{END_TIME}}', f"'{cluster.get('end_time')}'", query)
unload_query = f"unload ($${query}$$) to '{bucket.get('url')}/analysis/{replay}/raw_data/{query_name}' iam_role '{iam_role}' CSV header allowoverwrite parallel off;"
try:
cursor.execute(unload_query)
except Exception as e:
logger.error(f'Could not unload {query_name} results. Confirm IAM permissions include UNLOAD access for Redshift. {e}')
exit(-1)
logger.info(f"Query results available in {bucket.get('url')}")
queries = queries
</DeepExtract>
info = create_json(replay, cluster, workload, complete, stats, tag)
try:
boto3.resource('s3').Bucket(bucket.get('bucket_name')).upload_file(info, f'{replay_path}/{info}')
except ClientError as e:
logger.error(f'{e} Could not upload info. Confirm IAM permissions include S3::PutObject.')
if is_serverless:
exit(0)
else:
report = Report(cluster, replay, bucket, replay_path, tag, complete)
try:
for q in queries:
<DeepExtract>
logger = logging.getLogger('SimpleReplayLogger')
s3_client = boto3.client('s3')
try:
response = s3_client.get_object(Bucket=bucket.get('bucket_name'), Key=f'{replay_path}/raw_data/{q}000')
except Exception as e:
logger.error(f'Unable to get raw data from S3. Results for {q} not found. {e}')
df = pd.read_csv(response.get('Body')).fillna(0)
logger.debug(f"Parsing results from '{q}' query.")
if q == 'latency_distribution':
report.feature_graph = df
else:
for (t, vals) in report.tables.items():
if vals.get('sql') == q:
vals['data'] = read_data(t, df, vals.get('columns'), report)
</DeepExtract>
except s3_client.exceptions.NoSuchKey as e:
logger.error(f'{e} Raw data does not exist in S3. Error in replay analysis.')
exit(-1)
except Exception as e:
logger.error(f'{e}: Data read failed. Error in replay analysis.')
exit(-1)
logger.info(f'Generating report.')
pdf = pdf_gen(report, summary)
s3_resource = boto3.resource('s3')
try:
s3_resource.Bucket(bucket.get('bucket_name')).upload_file(pdf, f'{replay_path}/out/{pdf}')
s3_resource.Bucket(bucket.get('bucket_name')).upload_file(info, f'{replay_path}/out/{info}')
<DeepExtract>
logger = logging.getLogger('SimpleReplayLogger')
bucket = bucket_dict(bucket.get('url'))
logger.info(f'Simple Replay Workload Analysis: {replay}')
replay_path = f'analysis/{replay}/out/'
output_str = f'\nBelow is the presigned URLs for the analysis performed for replay: {replay}. Click or copy/paste the link into your browser to download.'
r_url = create_presigned_url(bucket.get('bucket_name'), f'{replay_path}{replay}_report.pdf')
output_str += f'\n\nReplay Analysis Report | Click to Download:\n{r_url}\n'
logger.info(output_str)
</DeepExtract>
except ClientError as e:
logger.error(f'{e} Could not upload report. Confirm IAM permissions include S3::PutObject.')
exit(-1)
|
def run_replay_analysis(replay, cluster_endpoint, start_time, end_time, bucket_url, iam_role, user, tag='', workload='', is_serverless=False, secret_name=None, nlb_nat_dns=None, complete=True, stats=None, summary=None):
"""End to end data collection, parsing, analysis and pdf generation
@param replay: str, replay id from replay.py
@param cluster_endpoint: str, target cluster endpoint
@param start_time: datetime object, start time of replay
@param end_time: datetime object, end time of replay
@param bucket_url: str, S3 bucket location
@param iam_role: str, IAM ARN for unload
@param user: str, master username for cluster
@param tag: str, optional identifier
@param is_serverless: bool, serverless or provisioned cluster
@param secret_name: str, name of the secret that stores admin username and password
@param nlb_nat_dns: str, dns endpoint if specified will be used to connect instead of target cluster endpoint
@param complete: bool, complete/incomplete replay run
@param stats: dict, run details
@param summary: str list, replay output summary from replay.py
"""
logger = logging.getLogger('SimpleReplayLogger')
s3_client = boto3.client('s3')
cluster = cluster_dict(cluster_endpoint, is_serverless, start_time, end_time)
cluster['is_serverless'] = is_serverless
cluster['secret_name'] = secret_name
cluster['host'] = nlb_nat_dns if nlb_nat_dns != None else cluster['host']
if type(bucket_url) is str:
bucket = bucket_dict(bucket_url)
logger.debug(bucket)
logger.info(f'Running analysis for replay: {replay}')
replay_path = f"{bucket['prefix']}analysis/{replay}"
logger = logging.getLogger('SimpleReplayLogger')
directory = 'sql/serverless'
queries = []
with initiate_connection(username=user, cluster=cluster) as conn:
cursor = conn.cursor()
logger.info(f"Querying {cluster.get('id')}. This may take some time.")
for file in sorted(os.listdir(directory)):
if not file.endswith('.sql'):
continue
with open(f'{directory}/{file}', 'r') as query_file:
query_name = os.path.splitext(file)[0]
logger.debug(f'Query: {query_name}')
queries.append(query_name)
query = query_file.read()
query = re.sub('{{START_TIME}}', f"'{cluster.get('start_time')}'", query)
query = re.sub('{{END_TIME}}', f"'{cluster.get('end_time')}'", query)
unload_query = f"unload ($${query}$$) to '{bucket.get('url')}/analysis/{replay}/raw_data/{query_name}' iam_role '{iam_role}' CSV header allowoverwrite parallel off;"
try:
cursor.execute(unload_query)
except Exception as e:
logger.error(f'Could not unload {query_name} results. Confirm IAM permissions include UNLOAD access for Redshift. {e}')
exit(-1)
logger.info(f"Query results available in {bucket.get('url')}")
queries = queries
info = create_json(replay, cluster, workload, complete, stats, tag)
try:
boto3.resource('s3').Bucket(bucket.get('bucket_name')).upload_file(info, f'{replay_path}/{info}')
except ClientError as e:
logger.error(f'{e} Could not upload info. Confirm IAM permissions include S3::PutObject.')
if is_serverless:
exit(0)
else:
report = Report(cluster, replay, bucket, replay_path, tag, complete)
try:
for q in queries:
logger = logging.getLogger('SimpleReplayLogger')
s3_client = boto3.client('s3')
try:
response = s3_client.get_object(Bucket=bucket.get('bucket_name'), Key=f'{replay_path}/raw_data/{q}000')
except Exception as e:
logger.error(f'Unable to get raw data from S3. Results for {q} not found. {e}')
df = pd.read_csv(response.get('Body')).fillna(0)
logger.debug(f"Parsing results from '{q}' query.")
if q == 'latency_distribution':
report.feature_graph = df
else:
for (t, vals) in report.tables.items():
if vals.get('sql') == q:
vals['data'] = read_data(t, df, vals.get('columns'), report)
except s3_client.exceptions.NoSuchKey as e:
logger.error(f'{e} Raw data does not exist in S3. Error in replay analysis.')
exit(-1)
except Exception as e:
logger.error(f'{e}: Data read failed. Error in replay analysis.')
exit(-1)
logger.info(f'Generating report.')
pdf = pdf_gen(report, summary)
s3_resource = boto3.resource('s3')
try:
s3_resource.Bucket(bucket.get('bucket_name')).upload_file(pdf, f'{replay_path}/out/{pdf}')
s3_resource.Bucket(bucket.get('bucket_name')).upload_file(info, f'{replay_path}/out/{info}')
logger = logging.getLogger('SimpleReplayLogger')
bucket = bucket_dict(bucket.get('url'))
logger.info(f'Simple Replay Workload Analysis: {replay}')
replay_path = f'analysis/{replay}/out/'
output_str = f'\nBelow is the presigned URLs for the analysis performed for replay: {replay}. Click or copy/paste the link into your browser to download.'
r_url = create_presigned_url(bucket.get('bucket_name'), f'{replay_path}{replay}_report.pdf')
output_str += f'\n\nReplay Analysis Report | Click to Download:\n{r_url}\n'
logger.info(output_str)
except ClientError as e:
logger.error(f'{e} Could not upload report. Confirm IAM permissions include S3::PutObject.')
exit(-1)
|
amazon-redshift-utils
|
positive
|
def sam2fastq(handle, outlist, last, first, paired):
"""Return tuple of fastq and bool of sam entry.
Bool is true if sam entry is correctly aligned."""
pname = 0
fqs = []
for sam in handle:
sam = sam.strip()
if not sam or sam.startswith('@'):
continue
(qname, flag, tname, pos, mapq, cigar, mrnm, mpos, tlen, seq, qual) = sam.split('\t')[:11]
flag = int(flag)
if flag & 16:
seq = str(Seq.Seq(seq).reverse_complement())
qual = qual[::-1]
if last:
(seq, qual) = (seq[:last], qual[:last])
if first:
(seq, qual) = (seq[first:], qual[first:])
fq = '@%s\n%s\n+\n%s\n' % (qname, seq, qual)
if paired:
if pname == qname:
fqs.append((fq, flag))
<DeepExtract>
for (fq, flag) in fqs:
if flag & 128:
outlist[1].write(fq)
else:
outlist[0].write(fq)
</DeepExtract>
fqs = []
else:
fqs = [(fq, flag)]
pname = qname
else:
outlist[0].write(fq)
|
def sam2fastq(handle, outlist, last, first, paired):
"""Return tuple of fastq and bool of sam entry.
Bool is true if sam entry is correctly aligned."""
pname = 0
fqs = []
for sam in handle:
sam = sam.strip()
if not sam or sam.startswith('@'):
continue
(qname, flag, tname, pos, mapq, cigar, mrnm, mpos, tlen, seq, qual) = sam.split('\t')[:11]
flag = int(flag)
if flag & 16:
seq = str(Seq.Seq(seq).reverse_complement())
qual = qual[::-1]
if last:
(seq, qual) = (seq[:last], qual[:last])
if first:
(seq, qual) = (seq[first:], qual[first:])
fq = '@%s\n%s\n+\n%s\n' % (qname, seq, qual)
if paired:
if pname == qname:
fqs.append((fq, flag))
for (fq, flag) in fqs:
if flag & 128:
outlist[1].write(fq)
else:
outlist[0].write(fq)
fqs = []
else:
fqs = [(fq, flag)]
pname = qname
else:
outlist[0].write(fq)
|
bin
|
positive
|
def parse_path_component(path, elm=None, cnt=None):
"""Parses a single str "@class/instance/attribute" or "Tag" segment, optionally followed by a
"[<begin>-<end>]" and/or "*<count>". Returns <path>,<element>,<count>. Priority for computing
element count is the "[<begin>-<end>]" range, any specified "*<count>", and finally the supplied
'cnt' (default: None).
"""
if '*' in path:
(path, cnt) = path.split('*', 1)
<DeepExtract>
try:
cnt = int(cnt, base=base)
except ValueError:
cnt = int(cnt, base=0)
</DeepExtract>
if '[' in path:
(path, elm) = path.split('[', 1)
(elm, rem) = elm.split(']')
assert not rem, 'Garbage after [...]: %r' % rem
lst = None
if '-' in elm:
(elm, lst) = elm.split('-')
lst = int(lst)
elm = int(elm)
if lst is not None:
cnt = lst + 1 - elm
assert cnt > 0, 'Invalid element range %d-%d' % (elm, lst)
segments = []
if path.startswith('@'):
try:
defaults = ('class', 'instance', 'attribute', 'element')
for (i, seg) in enumerate(path[1:].split('/')):
if seg.startswith('{'):
trm = json.loads(seg)
else:
assert i < len(defaults), 'No default segment type beyond %r' % defaults
trm = {defaults[i]: parse_int(seg)}
segments.append(trm)
except Exception as exc:
raise Exception('Invalid @%s; 1-4 (default decimal) terms, eg. 26, 0x1A, {"connection":100}, 0o46, 0b100110: %s' % ('/'.join(('<%s>' % d for d in defaults)), exc))
else:
segments.append({'symbolic': path})
if elm is not None:
if not segments or 'element' not in segments[-1]:
segments.append({})
segments[-1]['element'] = elm
return (segments, elm, cnt)
|
def parse_path_component(path, elm=None, cnt=None):
"""Parses a single str "@class/instance/attribute" or "Tag" segment, optionally followed by a
"[<begin>-<end>]" and/or "*<count>". Returns <path>,<element>,<count>. Priority for computing
element count is the "[<begin>-<end>]" range, any specified "*<count>", and finally the supplied
'cnt' (default: None).
"""
if '*' in path:
(path, cnt) = path.split('*', 1)
try:
cnt = int(cnt, base=base)
except ValueError:
cnt = int(cnt, base=0)
if '[' in path:
(path, elm) = path.split('[', 1)
(elm, rem) = elm.split(']')
assert not rem, 'Garbage after [...]: %r' % rem
lst = None
if '-' in elm:
(elm, lst) = elm.split('-')
lst = int(lst)
elm = int(elm)
if lst is not None:
cnt = lst + 1 - elm
assert cnt > 0, 'Invalid element range %d-%d' % (elm, lst)
segments = []
if path.startswith('@'):
try:
defaults = ('class', 'instance', 'attribute', 'element')
for (i, seg) in enumerate(path[1:].split('/')):
if seg.startswith('{'):
trm = json.loads(seg)
else:
assert i < len(defaults), 'No default segment type beyond %r' % defaults
trm = {defaults[i]: parse_int(seg)}
segments.append(trm)
except Exception as exc:
raise Exception('Invalid @%s; 1-4 (default decimal) terms, eg. 26, 0x1A, {"connection":100}, 0o46, 0b100110: %s' % ('/'.join(('<%s>' % d for d in defaults)), exc))
else:
segments.append({'symbolic': path})
if elm is not None:
if not segments or 'element' not in segments[-1]:
segments.append({})
segments[-1]['element'] = elm
return (segments, elm, cnt)
|
cpppo
|
positive
|
def __init__(self, input_channel=3, I_r_size=(32, 100), inputDataType='torch.cuda.FloatTensor', offsets=False, norm_type='BN', default_type=6, rand=False):
"""
Args:
input_channel (int): channel of input features,
set it to 1 if the grayscale images and 3 if RGB input
I_r_size (tuple): size of rectified images (used in STN transformations)
inputDataType (str): the type of input data,
only support 'torch.cuda.FloatTensor' this version
offsets (bool): set it to False if use SPN w.o. AIN,
and set it to True if use SPIN (both with SPN and AIN)
norm_type (str): the normalization type of the module,
set it to 'BN' by default, 'IN' optionally
default_type (int): the K chromatic space,
set it to 3/5/6 depend on the complexity of transformation intensities
rand (bool): initialize type, set it to False by default
"""
super(GA_SPIN_Transformer, self).__init__()
self.nc = input_channel
self.inputDataType = inputDataType
self.spt = True
self.offsets = offsets
self.stn = True
self.I_r_size = I_r_size
if norm_type == 'BN':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'IN':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
if self.spt:
self.sp_net = SP_TransformerNetwork(input_channel, default_type)
self.spt_convnet = nn.Sequential(nn.Conv2d(input_channel, 32, 3, 1, 1, bias=False), norm_layer(32), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(32, 64, 3, 1, 1, bias=False), norm_layer(64), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 128, 3, 1, 1, bias=False), norm_layer(128), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2))
self.stucture_fc1 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1, bias=False), norm_layer(256), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(256, 256, 3, 1, 1, bias=False), norm_layer(256), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(256, 512, 3, 1, 1, bias=False), norm_layer(512), nn.ReLU(True), nn.AdaptiveAvgPool2d(1), nn.Flatten(1, -1), nn.Linear(512, 256), nn.BatchNorm1d(256), nn.ReLU(True))
self.out_weight = 2 * default_type + 1
self.spt_length = 2 * default_type + 1
if offsets:
self.out_weight += 1
if self.stn:
self.F = 20
self.GridGenerator = GridGenerator(self.F, (self.I_r_size[0], self.I_r_size[1]))
self.out_weight += self.F * 2
self.stucture_fc2 = nn.Linear(256, self.out_weight)
self.sigmoid = nn.Sigmoid()
if offsets:
self.offset_fc1 = nn.Sequential(nn.Conv2d(128, 16, 3, 1, 1, bias=False), norm_layer(16), nn.ReLU(True))
self.offset_fc2 = nn.Conv2d(16, input_channel, 3, 1, 1)
self.pool = nn.MaxPool2d(2, 2)
<DeepExtract>
for m in self.offset_fc1.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
</DeepExtract>
<DeepExtract>
for m in self.offset_fc2.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
</DeepExtract>
<DeepExtract>
for m in self.sp_net.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
</DeepExtract>
<DeepExtract>
for m in self.spt_convnet.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
</DeepExtract>
<DeepExtract>
for m in self.stucture_fc1.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
</DeepExtract>
if rand:
nn.init.kaiming_normal_(self.stucture_fc2.weight.data, nonlinearity='relu')
self.stucture_fc2.bias.data.fill_(0)
else:
<DeepExtract>
init_id = [0.0] * default_type * 2 + [5.0]
if self.offsets:
init_id += [-5.0]
init = np.array(init_id)
self.stucture_fc2.weight.data.fill_(0)
if self.stn:
F = self.F
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
initial_bias = initial_bias.reshape(-1)
init = np.concatenate([init, initial_bias], axis=0)
self.stucture_fc2.bias.data = torch.from_numpy(init).float().view(-1)
</DeepExtract>
|
def __init__(self, input_channel=3, I_r_size=(32, 100), inputDataType='torch.cuda.FloatTensor', offsets=False, norm_type='BN', default_type=6, rand=False):
"""
Args:
input_channel (int): channel of input features,
set it to 1 if the grayscale images and 3 if RGB input
I_r_size (tuple): size of rectified images (used in STN transformations)
inputDataType (str): the type of input data,
only support 'torch.cuda.FloatTensor' this version
offsets (bool): set it to False if use SPN w.o. AIN,
and set it to True if use SPIN (both with SPN and AIN)
norm_type (str): the normalization type of the module,
set it to 'BN' by default, 'IN' optionally
default_type (int): the K chromatic space,
set it to 3/5/6 depend on the complexity of transformation intensities
rand (bool): initialize type, set it to False by default
"""
super(GA_SPIN_Transformer, self).__init__()
self.nc = input_channel
self.inputDataType = inputDataType
self.spt = True
self.offsets = offsets
self.stn = True
self.I_r_size = I_r_size
if norm_type == 'BN':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'IN':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
if self.spt:
self.sp_net = SP_TransformerNetwork(input_channel, default_type)
self.spt_convnet = nn.Sequential(nn.Conv2d(input_channel, 32, 3, 1, 1, bias=False), norm_layer(32), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(32, 64, 3, 1, 1, bias=False), norm_layer(64), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 128, 3, 1, 1, bias=False), norm_layer(128), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2))
self.stucture_fc1 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1, bias=False), norm_layer(256), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(256, 256, 3, 1, 1, bias=False), norm_layer(256), nn.ReLU(True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(256, 512, 3, 1, 1, bias=False), norm_layer(512), nn.ReLU(True), nn.AdaptiveAvgPool2d(1), nn.Flatten(1, -1), nn.Linear(512, 256), nn.BatchNorm1d(256), nn.ReLU(True))
self.out_weight = 2 * default_type + 1
self.spt_length = 2 * default_type + 1
if offsets:
self.out_weight += 1
if self.stn:
self.F = 20
self.GridGenerator = GridGenerator(self.F, (self.I_r_size[0], self.I_r_size[1]))
self.out_weight += self.F * 2
self.stucture_fc2 = nn.Linear(256, self.out_weight)
self.sigmoid = nn.Sigmoid()
if offsets:
self.offset_fc1 = nn.Sequential(nn.Conv2d(128, 16, 3, 1, 1, bias=False), norm_layer(16), nn.ReLU(True))
self.offset_fc2 = nn.Conv2d(16, input_channel, 3, 1, 1)
self.pool = nn.MaxPool2d(2, 2)
for m in self.offset_fc1.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
for m in self.offset_fc2.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
for m in self.sp_net.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
for m in self.spt_convnet.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
for m in self.stucture_fc1.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.001)
m.bias.data.zero_()
if rand:
nn.init.kaiming_normal_(self.stucture_fc2.weight.data, nonlinearity='relu')
self.stucture_fc2.bias.data.fill_(0)
else:
init_id = [0.0] * default_type * 2 + [5.0]
if self.offsets:
init_id += [-5.0]
init = np.array(init_id)
self.stucture_fc2.weight.data.fill_(0)
if self.stn:
F = self.F
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
initial_bias = initial_bias.reshape(-1)
init = np.concatenate([init, initial_bias], axis=0)
self.stucture_fc2.bias.data = torch.from_numpy(init).float().view(-1)
</DeepExtract>
|
DAVAR-Lab-OCR
|
positive
|
def to_dataframe(self, channels: ChannelsType | None=None, raster: RasterType | None=None, time_from_zero: bool=True, empty_channels: EmptyChannelsType='skip', keep_arrays: bool=False, use_display_names: bool=False, time_as_date: bool=False, reduce_memory_usage: bool=False, raw: bool=False, ignore_value2text_conversions: bool=False, use_interpolation: bool=True, only_basenames: bool=False, interpolate_outwards_with_nan: bool=False, numeric_1D_only: bool=False, progress=None) -> pd.DataFrame:
"""generate pandas DataFrame
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
.. versionadded:: 5.11.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
.. versionadded:: 5.15.0
Returns
-------
dataframe : pandas.DataFrame
"""
if channels is not None:
<DeepExtract>
if version is None:
version = self.version
else:
version = validate_version_argument(version)
gps = self.included_channels(channels=channels)
mdf = MDF(version=version, **self._kwargs)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
self.configure(copy_on_get=False)
if self.name:
origin = self.name.name
else:
origin = 'New MDF'
groups_nr = len(gps)
if progress is not None:
if callable(progress):
progress(0, groups_nr)
else:
progress.signals.setValue.emit(0)
progress.signals.setMaximum.emit(groups_nr)
if progress.stop:
mdf = TERMINATED
for (i, (group_index, groups)) in enumerate(gps.items()):
for (idx, sigs) in enumerate(self._yield_selected_signals(group_index, groups=groups, version=version)):
if not sigs:
break
if idx == 0:
if sigs:
cg = self.groups[group_index].channel_group
cg_nr = mdf.append(sigs, common_timebase=True, comment=cg.comment)
MDF._transfer_channel_group_data(mdf.groups[cg_nr].channel_group, cg)
else:
break
else:
mdf.extend(cg_nr, sigs)
if progress is not None:
if callable(progress):
progress(i + 1, groups_nr)
else:
progress.signals.setValue.emit(i + 1)
if progress.stop:
mdf = TERMINATED
self.configure(copy_on_get=True)
mdf._transfer_metadata(self, message=f'Filtered from {self.name}')
mdf = mdf
</DeepExtract>
result = mdf.to_dataframe(raster=raster, time_from_zero=time_from_zero, empty_channels=empty_channels, keep_arrays=keep_arrays, use_display_names=use_display_names, time_as_date=time_as_date, reduce_memory_usage=reduce_memory_usage, raw=raw, ignore_value2text_conversions=ignore_value2text_conversions, use_interpolation=use_interpolation, only_basenames=only_basenames, interpolate_outwards_with_nan=interpolate_outwards_with_nan, numeric_1D_only=numeric_1D_only)
mdf.close()
return result
target_byte_order = '<=' if sys.byteorder == 'little' else '>='
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype='<f4')
del masters
idx = np.argwhere(np.diff(master, prepend=-np.inf) > 0).flatten()
master = master[idx]
used_names = UniqueDB()
used_names.get_unique_name('timestamps')
groups_nr = len(self.virtual_groups)
if progress is not None:
if callable(progress):
progress(0, groups_nr)
else:
progress.signals.setValue.emit(0)
progress.signals.setMaximum.emit(groups_nr)
if progress.stop:
return TERMINATED
for (group_index, (virtual_group_index, virtual_group)) in enumerate(self.virtual_groups.items()):
if virtual_group.cycles_nr == 0 and empty_channels == 'skip':
continue
channels = [(None, gp_index, ch_index) for (gp_index, channel_indexes) in self.included_channels(virtual_group_index)[virtual_group_index].items() for ch_index in channel_indexes if ch_index != self.masters_db.get(gp_index, None)]
signals = [signal for signal in self.select(channels, raw=True, copy_master=False, validate=False)]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == 'zeros':
sig.samples = np.zeros(len(master) if virtual_group.cycles_nr == 0 else virtual_group.cycles_nr, dtype=sig.samples.dtype)
sig.timestamps = master if virtual_group.cycles_nr == 0 else group_master
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in 'US':
signal.samples = samples
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(signal.samples)
for (s_index, sig) in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == 'zeros':
sig.samples = np.zeros(len(master) if virtual_group.cycles_nr == 0 else virtual_group.cycles_nr, dtype=sig.samples.dtype)
sig.timestamps = master if virtual_group.cycles_nr == 0 else group_master
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere((master >= group_master[0]) & (master <= group_master[-1])).flatten()
cycles = len(group_master)
signals = [signal.interp(master, integer_interpolation_mode=self._integer_interpolation, float_interpolation_mode=self._float_interpolation) if not same_master or len(signal) != cycles else signal for signal in signals]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
if any((len(sig) for sig in signals)):
signals = [sig for sig in signals if len(sig)]
if group_master.dtype.byteorder not in target_byte_order:
group_master = group_master.byteswap().newbyteorder()
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
else:
index = pd.Index(group_master, tupleize_cols=False)
size = len(index)
for (k, sig) in enumerate(signals):
if sig.timestamps.dtype.byteorder not in target_byte_order:
sig.timestamps = sig.timestamps.byteswap().newbyteorder()
sig_index = index if len(sig) == size else pd.Index(sig.timestamps, tupleize_cols=False)
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = list(sig.display_names)[0] if sig.display_names else sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if sig.samples.dtype.byteorder not in target_byte_order:
sig.samples = sig.samples.byteswap().newbyteorder()
df[channel_name] = pd.Series(list(sig.samples), index=sig_index)
elif sig.samples.dtype.names:
for (name, series) in components(sig.samples, sig.name, used_names, master=sig_index, only_basenames=only_basenames):
df[name] = series
else:
if use_display_names:
channel_name = list(sig.display_names)[0] if sig.display_names else sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind not in 'SU':
sig.samples = downcast(sig.samples)
if sig.samples.dtype.byteorder not in target_byte_order:
sig.samples = sig.samples.byteswap().newbyteorder()
df[channel_name] = pd.Series(sig.samples, index=sig_index, fastpath=True)
if progress is not None:
if callable(progress):
progress(group_index + 1, groups_nr)
else:
progress.signals.setValue.emit(group_index + 1)
if progress.stop:
return TERMINATED
(strings, nonstrings) = ({}, {})
for (col, series) in df.items():
if series.dtype.kind == 'S':
strings[col] = series
else:
nonstrings[col] = series
if numeric_1D_only:
nonstrings = {col: series for (col, series) in nonstrings.items() if series.dtype.kind in 'uif'}
strings = {}
df = pd.DataFrame(nonstrings, index=master)
if strings:
df_strings = pd.DataFrame(strings, index=master)
df = pd.concat([df, df_strings], axis=1)
df.index.name = 'timestamps'
if time_as_date:
delta = pd.to_timedelta(df.index, unit='s')
new_index = self.header.start_time + delta
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
return df
|
def to_dataframe(self, channels: ChannelsType | None=None, raster: RasterType | None=None, time_from_zero: bool=True, empty_channels: EmptyChannelsType='skip', keep_arrays: bool=False, use_display_names: bool=False, time_as_date: bool=False, reduce_memory_usage: bool=False, raw: bool=False, ignore_value2text_conversions: bool=False, use_interpolation: bool=True, only_basenames: bool=False, interpolate_outwards_with_nan: bool=False, numeric_1D_only: bool=False, progress=None) -> pd.DataFrame:
"""generate pandas DataFrame
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
.. versionadded:: 5.11.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
.. versionadded:: 5.15.0
Returns
-------
dataframe : pandas.DataFrame
"""
if channels is not None:
if version is None:
version = self.version
else:
version = validate_version_argument(version)
gps = self.included_channels(channels=channels)
mdf = MDF(version=version, **self._kwargs)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
self.configure(copy_on_get=False)
if self.name:
origin = self.name.name
else:
origin = 'New MDF'
groups_nr = len(gps)
if progress is not None:
if callable(progress):
progress(0, groups_nr)
else:
progress.signals.setValue.emit(0)
progress.signals.setMaximum.emit(groups_nr)
if progress.stop:
mdf = TERMINATED
for (i, (group_index, groups)) in enumerate(gps.items()):
for (idx, sigs) in enumerate(self._yield_selected_signals(group_index, groups=groups, version=version)):
if not sigs:
break
if idx == 0:
if sigs:
cg = self.groups[group_index].channel_group
cg_nr = mdf.append(sigs, common_timebase=True, comment=cg.comment)
MDF._transfer_channel_group_data(mdf.groups[cg_nr].channel_group, cg)
else:
break
else:
mdf.extend(cg_nr, sigs)
if progress is not None:
if callable(progress):
progress(i + 1, groups_nr)
else:
progress.signals.setValue.emit(i + 1)
if progress.stop:
mdf = TERMINATED
self.configure(copy_on_get=True)
mdf._transfer_metadata(self, message=f'Filtered from {self.name}')
mdf = mdf
result = mdf.to_dataframe(raster=raster, time_from_zero=time_from_zero, empty_channels=empty_channels, keep_arrays=keep_arrays, use_display_names=use_display_names, time_as_date=time_as_date, reduce_memory_usage=reduce_memory_usage, raw=raw, ignore_value2text_conversions=ignore_value2text_conversions, use_interpolation=use_interpolation, only_basenames=only_basenames, interpolate_outwards_with_nan=interpolate_outwards_with_nan, numeric_1D_only=numeric_1D_only)
mdf.close()
return result
target_byte_order = '<=' if sys.byteorder == 'little' else '>='
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype='<f4')
del masters
idx = np.argwhere(np.diff(master, prepend=-np.inf) > 0).flatten()
master = master[idx]
used_names = UniqueDB()
used_names.get_unique_name('timestamps')
groups_nr = len(self.virtual_groups)
if progress is not None:
if callable(progress):
progress(0, groups_nr)
else:
progress.signals.setValue.emit(0)
progress.signals.setMaximum.emit(groups_nr)
if progress.stop:
return TERMINATED
for (group_index, (virtual_group_index, virtual_group)) in enumerate(self.virtual_groups.items()):
if virtual_group.cycles_nr == 0 and empty_channels == 'skip':
continue
channels = [(None, gp_index, ch_index) for (gp_index, channel_indexes) in self.included_channels(virtual_group_index)[virtual_group_index].items() for ch_index in channel_indexes if ch_index != self.masters_db.get(gp_index, None)]
signals = [signal for signal in self.select(channels, raw=True, copy_master=False, validate=False)]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == 'zeros':
sig.samples = np.zeros(len(master) if virtual_group.cycles_nr == 0 else virtual_group.cycles_nr, dtype=sig.samples.dtype)
sig.timestamps = master if virtual_group.cycles_nr == 0 else group_master
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in 'US':
signal.samples = samples
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(signal.samples)
for (s_index, sig) in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == 'zeros':
sig.samples = np.zeros(len(master) if virtual_group.cycles_nr == 0 else virtual_group.cycles_nr, dtype=sig.samples.dtype)
sig.timestamps = master if virtual_group.cycles_nr == 0 else group_master
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere((master >= group_master[0]) & (master <= group_master[-1])).flatten()
cycles = len(group_master)
signals = [signal.interp(master, integer_interpolation_mode=self._integer_interpolation, float_interpolation_mode=self._float_interpolation) if not same_master or len(signal) != cycles else signal for signal in signals]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
if any((len(sig) for sig in signals)):
signals = [sig for sig in signals if len(sig)]
if group_master.dtype.byteorder not in target_byte_order:
group_master = group_master.byteswap().newbyteorder()
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
else:
index = pd.Index(group_master, tupleize_cols=False)
size = len(index)
for (k, sig) in enumerate(signals):
if sig.timestamps.dtype.byteorder not in target_byte_order:
sig.timestamps = sig.timestamps.byteswap().newbyteorder()
sig_index = index if len(sig) == size else pd.Index(sig.timestamps, tupleize_cols=False)
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = list(sig.display_names)[0] if sig.display_names else sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if sig.samples.dtype.byteorder not in target_byte_order:
sig.samples = sig.samples.byteswap().newbyteorder()
df[channel_name] = pd.Series(list(sig.samples), index=sig_index)
elif sig.samples.dtype.names:
for (name, series) in components(sig.samples, sig.name, used_names, master=sig_index, only_basenames=only_basenames):
df[name] = series
else:
if use_display_names:
channel_name = list(sig.display_names)[0] if sig.display_names else sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind not in 'SU':
sig.samples = downcast(sig.samples)
if sig.samples.dtype.byteorder not in target_byte_order:
sig.samples = sig.samples.byteswap().newbyteorder()
df[channel_name] = pd.Series(sig.samples, index=sig_index, fastpath=True)
if progress is not None:
if callable(progress):
progress(group_index + 1, groups_nr)
else:
progress.signals.setValue.emit(group_index + 1)
if progress.stop:
return TERMINATED
(strings, nonstrings) = ({}, {})
for (col, series) in df.items():
if series.dtype.kind == 'S':
strings[col] = series
else:
nonstrings[col] = series
if numeric_1D_only:
nonstrings = {col: series for (col, series) in nonstrings.items() if series.dtype.kind in 'uif'}
strings = {}
df = pd.DataFrame(nonstrings, index=master)
if strings:
df_strings = pd.DataFrame(strings, index=master)
df = pd.concat([df, df_strings], axis=1)
df.index.name = 'timestamps'
if time_as_date:
delta = pd.to_timedelta(df.index, unit='s')
new_index = self.header.start_time + delta
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
return df
|
asammdf
|
positive
|
def GAN(fake_out, real_out):
log_d1 = -T.nnet.softplus(-fake_out)
log_d0 = -fake_out - T.nnet.softplus(-fake_out)
log_w = log_d1 - log_d0
log_N = T.log(log_w.shape[0]).astype(log_w.dtype)
<DeepExtract>
x_max = T.max(log_w - log_N, axis=0, keepdims=True)
y = T.log(T.sum(T.exp(log_w - log_N - x_max), axis=0, keepdims=True)) + x_max
y = T.sum(y, axis=0)
log_Z_est = y
</DeepExtract>
log_Z_est = theano.gradient.disconnected_grad(log_Z_est)
generator_loss = T.nnet.softplus(-fake_out).mean()
discriminator_loss = T.nnet.softplus(-real_out).mean() + (T.nnet.softplus(-fake_out) + fake_out).mean()
return (generator_loss, discriminator_loss, log_Z_est)
|
def GAN(fake_out, real_out):
log_d1 = -T.nnet.softplus(-fake_out)
log_d0 = -fake_out - T.nnet.softplus(-fake_out)
log_w = log_d1 - log_d0
log_N = T.log(log_w.shape[0]).astype(log_w.dtype)
x_max = T.max(log_w - log_N, axis=0, keepdims=True)
y = T.log(T.sum(T.exp(log_w - log_N - x_max), axis=0, keepdims=True)) + x_max
y = T.sum(y, axis=0)
log_Z_est = y
log_Z_est = theano.gradient.disconnected_grad(log_Z_est)
generator_loss = T.nnet.softplus(-fake_out).mean()
discriminator_loss = T.nnet.softplus(-real_out).mean() + (T.nnet.softplus(-fake_out) + fake_out).mean()
return (generator_loss, discriminator_loss, log_Z_est)
|
BGAN
|
positive
|
def make_fea_sim_mat(self):
"""Make feature similarity matrix.
Note that the first column is the user/item ID.
Returns:
normalized_adj_single
"""
<DeepExtract>
print('Load user and item features from datasets')
(user_feat, item_feat) = load_user_item_feature(self.config)
user_feat_li = [None for i in range(self.n_users)]
item_feat_li = [None for i in range(self.n_items)]
for user in user_feat:
if user[0] in self.user2id:
user_feat_li[self.user2id[user[0]]] = user[1:]
for item in item_feat:
if item[0] in self.item2id:
item_feat_li[self.item2id[item[0]]] = item[1:]
self.user_feat = np.stack(user_feat_li)
self.item_feat = np.stack(item_feat_li)
</DeepExtract>
<DeepExtract>
self.n_train = 0
self.train_items = {}
self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)
user_np = np.array(self.train[DEFAULT_USER_COL])
item_np = np.array(self.train[DEFAULT_ITEM_COL])
for u in range(self.n_users):
index = list(np.where(user_np == u)[0])
i = item_np[index]
self.train_items[u] = i
for item in i:
self.R[u, item] = 1
self.n_train += 1
</DeepExtract>
user_sim_mat = sp.csr_matrix(calc_sim(self.user_feat))
item_sim_mat = sp.csr_matrix(calc_sim(self.item_feat))
return (normalized_adj_single(user_sim_mat + sp.eye(user_sim_mat.shape[0])), normalized_adj_single(item_sim_mat + sp.eye(item_sim_mat.shape[0])))
|
def make_fea_sim_mat(self):
"""Make feature similarity matrix.
Note that the first column is the user/item ID.
Returns:
normalized_adj_single
"""
print('Load user and item features from datasets')
(user_feat, item_feat) = load_user_item_feature(self.config)
user_feat_li = [None for i in range(self.n_users)]
item_feat_li = [None for i in range(self.n_items)]
for user in user_feat:
if user[0] in self.user2id:
user_feat_li[self.user2id[user[0]]] = user[1:]
for item in item_feat:
if item[0] in self.item2id:
item_feat_li[self.item2id[item[0]]] = item[1:]
self.user_feat = np.stack(user_feat_li)
self.item_feat = np.stack(item_feat_li)
self.n_train = 0
self.train_items = {}
self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)
user_np = np.array(self.train[DEFAULT_USER_COL])
item_np = np.array(self.train[DEFAULT_ITEM_COL])
for u in range(self.n_users):
index = list(np.where(user_np == u)[0])
i = item_np[index]
self.train_items[u] = i
for item in i:
self.R[u, item] = 1
self.n_train += 1
user_sim_mat = sp.csr_matrix(calc_sim(self.user_feat))
item_sim_mat = sp.csr_matrix(calc_sim(self.item_feat))
return (normalized_adj_single(user_sim_mat + sp.eye(user_sim_mat.shape[0])), normalized_adj_single(item_sim_mat + sp.eye(item_sim_mat.shape[0])))
|
beta-recsys
|
positive
|
def norm_exp(log_factor):
"""Gets normalized weights.
"""
log_factor = log_factor - T.log(log_factor.shape[0]).astype(floatX)
<DeepExtract>
x_max = T.max(log_factor, axis=0, keepdims=True)
y = T.log(T.sum(T.exp(log_factor - x_max), axis=0, keepdims=True)) + x_max
y = T.sum(y, axis=0)
w_norm = y
</DeepExtract>
log_w = log_factor - T.shape_padleft(w_norm)
w_tilde = T.exp(log_w)
return w_tilde
|
def norm_exp(log_factor):
"""Gets normalized weights.
"""
log_factor = log_factor - T.log(log_factor.shape[0]).astype(floatX)
x_max = T.max(log_factor, axis=0, keepdims=True)
y = T.log(T.sum(T.exp(log_factor - x_max), axis=0, keepdims=True)) + x_max
y = T.sum(y, axis=0)
w_norm = y
log_w = log_factor - T.shape_padleft(w_norm)
w_tilde = T.exp(log_w)
return w_tilde
|
BGAN
|
positive
|
def evaluate_langatt(self, data_loader: DataLoader, language_weight, out_path: Path=None, embeddings_storage_mode: str='cpu') -> (Result, float):
with torch.no_grad():
eval_loss = 0
batch_no: int = 0
metric = Metric('Evaluation')
lines: List[str] = []
for batch in data_loader:
batch_no += 1
with torch.no_grad():
sent_lang_id = torch.cuda.LongTensor([sentence.lang_id for sentence in batch])
teacher_attention = torch.index_select(language_weight, 0, sent_lang_id)
teacher_attention = F.softmax(teacher_attention, -1)
teacher_features = torch.stack([sentence.get_teacher_prediction(pooling='weighted', weight=teacher_attention[idx]) for (idx, sentence) in enumerate(batch)], 0)
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
longest_token_sequence_in_batch: int = max(lengths)
mask = self.sequence_mask(torch.tensor(lengths), longest_token_sequence_in_batch).cuda().type_as(teacher_features)
if self.distill_prob:
features = (teacher_features + 1e-100 * (1 - mask.unsqueeze(-1))).log() * mask.unsqueeze(-1)
else:
features = teacher_features
<DeepExtract>
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
tag_list: List = []
for (s_id, sentence) in enumerate(batch):
tag_idx: List[int] = [self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value) for token in sentence]
tag = torch.tensor(tag_idx, device=flair.device)
tag_list.append(tag)
if self.use_crf:
(tags, _) = pad_tensors(tag_list)
forward_score = self._forward_alg(features, lengths)
gold_score = self._score_sentence(features, tags, lengths)
score = forward_score - gold_score
loss = score.mean()
elif self.sentence_level_loss:
score = 0
for (sentence_feats, sentence_tags, sentence_length) in zip(features, tag_list, lengths):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(sentence_feats, sentence_tags, reduction='sum')
score /= len(features)
loss = score
else:
score = 0
for (sentence_feats, sentence_tags, sentence_length) in zip(features, tag_list, lengths):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(sentence_feats, sentence_tags)
score /= len(features)
loss = score
</DeepExtract>
<DeepExtract>
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
tags = []
all_tags = []
if not self.use_crf:
distribution = F.softmax(features, dim=-1)
(_, indices) = torch.max(features, -1)
sentrange = torch.arange(0, distribution.shape[1]).long().cuda()
if self.predict_posterior:
lengths1 = torch.Tensor(lengths).long()
mask = self.mask
forward_var = self._forward_alg(features, lengths1, distill_mode=True)
backward_var = self._backward_alg(features, lengths1)
forward_backward_score = (forward_var + backward_var) * mask.unsqueeze(-1)
distribution = F.softmax(forward_backward_score, dim=-1)
(_, indices) = torch.max(forward_backward_score, -1)
sentrange = torch.arange(0, distribution.shape[1]).long().cuda()
for (i, vals) in enumerate(zip(features, lengths)):
(feats, length) = vals
if self.use_crf and (not self.predict_posterior):
(confidences, tag_seq, scores) = self._viterbi_decode(feats[:length], all_scores=get_all_tags, current_idx=i)
else:
tag_seq = []
confidences = []
scores = []
tag_seq = indices[i][:length].tolist()
confidences = distribution[i][sentrange, indices[i]][:length].tolist()
scores = distribution[i][:length].tolist()
tags.append([Label(self.tag_dictionary.get_item_for_index(tag), conf) for (conf, tag) in zip(confidences, tag_seq)])
if get_all_tags:
all_tags.append([[Label(self.tag_dictionary.get_item_for_index(score_id), score) for (score_id, score) in enumerate(score_dist)] for score_dist in scores])
(tags, _) = (tags, all_tags)
</DeepExtract>
eval_loss += loss
for (sentence, sent_tags) in zip(batch, tags):
for (token, tag) in zip(sentence.tokens, sent_tags):
token: Token = token
token.add_tag_label('predicted', tag)
eval_line = '{} {} {} {}\n'.format(token.text, token.get_tag(self.tag_type).value, tag.value, tag.score)
lines.append(eval_line)
lines.append('\n')
for sentence in batch:
gold_tags = [(tag.tag, str(tag)) for tag in sentence.get_spans(self.tag_type)]
predicted_tags = [(tag.tag, str(tag)) for tag in sentence.get_spans('predicted')]
for (tag, prediction) in predicted_tags:
if (tag, prediction) in gold_tags:
metric.add_tp(tag)
else:
metric.add_fp(tag)
for (tag, gold) in gold_tags:
if (tag, gold) not in predicted_tags:
metric.add_fn(tag)
else:
metric.add_tn(tag)
store_embeddings(batch, embeddings_storage_mode)
eval_loss /= batch_no
if out_path is not None:
with open(out_path, 'w', encoding='utf-8') as outfile:
outfile.write(''.join(lines))
detailed_result = f'\nMICRO_AVG: acc {metric.micro_avg_accuracy()} - f1-score {metric.micro_avg_f_score()}\nMACRO_AVG: acc {metric.macro_avg_accuracy()} - f1-score {metric.macro_avg_f_score()}'
for class_name in metric.get_classes():
detailed_result += f'\n{class_name:<10} tp: {metric.get_tp(class_name)} - fp: {metric.get_fp(class_name)} - fn: {metric.get_fn(class_name)} - tn: {metric.get_tn(class_name)} - precision: {metric.precision(class_name):.4f} - recall: {metric.recall(class_name):.4f} - accuracy: {metric.accuracy(class_name):.4f} - f1-score: {metric.f_score(class_name):.4f}'
result = Result(main_score=metric.micro_avg_f_score(), log_line=f'{metric.precision()}\t{metric.recall()}\t{metric.micro_avg_f_score()}', log_header='PRECISION\tRECALL\tF1', detailed_results=detailed_result)
return (result, eval_loss)
|
def evaluate_langatt(self, data_loader: DataLoader, language_weight, out_path: Path=None, embeddings_storage_mode: str='cpu') -> (Result, float):
with torch.no_grad():
eval_loss = 0
batch_no: int = 0
metric = Metric('Evaluation')
lines: List[str] = []
for batch in data_loader:
batch_no += 1
with torch.no_grad():
sent_lang_id = torch.cuda.LongTensor([sentence.lang_id for sentence in batch])
teacher_attention = torch.index_select(language_weight, 0, sent_lang_id)
teacher_attention = F.softmax(teacher_attention, -1)
teacher_features = torch.stack([sentence.get_teacher_prediction(pooling='weighted', weight=teacher_attention[idx]) for (idx, sentence) in enumerate(batch)], 0)
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
longest_token_sequence_in_batch: int = max(lengths)
mask = self.sequence_mask(torch.tensor(lengths), longest_token_sequence_in_batch).cuda().type_as(teacher_features)
if self.distill_prob:
features = (teacher_features + 1e-100 * (1 - mask.unsqueeze(-1))).log() * mask.unsqueeze(-1)
else:
features = teacher_features
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
tag_list: List = []
for (s_id, sentence) in enumerate(batch):
tag_idx: List[int] = [self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value) for token in sentence]
tag = torch.tensor(tag_idx, device=flair.device)
tag_list.append(tag)
if self.use_crf:
(tags, _) = pad_tensors(tag_list)
forward_score = self._forward_alg(features, lengths)
gold_score = self._score_sentence(features, tags, lengths)
score = forward_score - gold_score
loss = score.mean()
elif self.sentence_level_loss:
score = 0
for (sentence_feats, sentence_tags, sentence_length) in zip(features, tag_list, lengths):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(sentence_feats, sentence_tags, reduction='sum')
score /= len(features)
loss = score
else:
score = 0
for (sentence_feats, sentence_tags, sentence_length) in zip(features, tag_list, lengths):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(sentence_feats, sentence_tags)
score /= len(features)
loss = score
lengths: List[int] = [len(sentence.tokens) for sentence in batch]
tags = []
all_tags = []
if not self.use_crf:
distribution = F.softmax(features, dim=-1)
(_, indices) = torch.max(features, -1)
sentrange = torch.arange(0, distribution.shape[1]).long().cuda()
if self.predict_posterior:
lengths1 = torch.Tensor(lengths).long()
mask = self.mask
forward_var = self._forward_alg(features, lengths1, distill_mode=True)
backward_var = self._backward_alg(features, lengths1)
forward_backward_score = (forward_var + backward_var) * mask.unsqueeze(-1)
distribution = F.softmax(forward_backward_score, dim=-1)
(_, indices) = torch.max(forward_backward_score, -1)
sentrange = torch.arange(0, distribution.shape[1]).long().cuda()
for (i, vals) in enumerate(zip(features, lengths)):
(feats, length) = vals
if self.use_crf and (not self.predict_posterior):
(confidences, tag_seq, scores) = self._viterbi_decode(feats[:length], all_scores=get_all_tags, current_idx=i)
else:
tag_seq = []
confidences = []
scores = []
tag_seq = indices[i][:length].tolist()
confidences = distribution[i][sentrange, indices[i]][:length].tolist()
scores = distribution[i][:length].tolist()
tags.append([Label(self.tag_dictionary.get_item_for_index(tag), conf) for (conf, tag) in zip(confidences, tag_seq)])
if get_all_tags:
all_tags.append([[Label(self.tag_dictionary.get_item_for_index(score_id), score) for (score_id, score) in enumerate(score_dist)] for score_dist in scores])
(tags, _) = (tags, all_tags)
eval_loss += loss
for (sentence, sent_tags) in zip(batch, tags):
for (token, tag) in zip(sentence.tokens, sent_tags):
token: Token = token
token.add_tag_label('predicted', tag)
eval_line = '{} {} {} {}\n'.format(token.text, token.get_tag(self.tag_type).value, tag.value, tag.score)
lines.append(eval_line)
lines.append('\n')
for sentence in batch:
gold_tags = [(tag.tag, str(tag)) for tag in sentence.get_spans(self.tag_type)]
predicted_tags = [(tag.tag, str(tag)) for tag in sentence.get_spans('predicted')]
for (tag, prediction) in predicted_tags:
if (tag, prediction) in gold_tags:
metric.add_tp(tag)
else:
metric.add_fp(tag)
for (tag, gold) in gold_tags:
if (tag, gold) not in predicted_tags:
metric.add_fn(tag)
else:
metric.add_tn(tag)
store_embeddings(batch, embeddings_storage_mode)
eval_loss /= batch_no
if out_path is not None:
with open(out_path, 'w', encoding='utf-8') as outfile:
outfile.write(''.join(lines))
detailed_result = f'\nMICRO_AVG: acc {metric.micro_avg_accuracy()} - f1-score {metric.micro_avg_f_score()}\nMACRO_AVG: acc {metric.macro_avg_accuracy()} - f1-score {metric.macro_avg_f_score()}'
for class_name in metric.get_classes():
detailed_result += f'\n{class_name:<10} tp: {metric.get_tp(class_name)} - fp: {metric.get_fp(class_name)} - fn: {metric.get_fn(class_name)} - tn: {metric.get_tn(class_name)} - precision: {metric.precision(class_name):.4f} - recall: {metric.recall(class_name):.4f} - accuracy: {metric.accuracy(class_name):.4f} - f1-score: {metric.f_score(class_name):.4f}'
result = Result(main_score=metric.micro_avg_f_score(), log_line=f'{metric.precision()}\t{metric.recall()}\t{metric.micro_avg_f_score()}', log_header='PRECISION\tRECALL\tF1', detailed_results=detailed_result)
return (result, eval_loss)
|
ACE
|
positive
|
def test_nb_args(self):
"""Should have 1 arg."""
(typo, good) = ('1, 2', '1')
<DeepExtract>
function = 'def {0}({1}):\n{2}\n'.format(name, 'a', indent_code(body))
if '{0}' is None:
func_code = function
else:
call = '{0}({1})\n'.format(name, '{0}')
func_code = function + call
</DeepExtract>
<DeepExtract>
indent_body = indent_code(body, tab='\t\t')
method = 'def {0}({1}):\n{2}\n'.format(name, 'self, a', indent_code(body))
class_def = 'class {0}():\n{1}\n'.format(class_name, indent_code(method))
if '{0}' is None:
meth_code = class_def
else:
call = '{0}().{1}({2})\n'.format(class_name, name, '{0}')
meth_code = class_def + call
</DeepExtract>
for code in [func_code, meth_code]:
<DeepExtract>
(bad_code, good_code) = [code.format(arg) for arg in args]
</DeepExtract>
<DeepExtract>
sugg = sorted(listify(sugg, [], str))
(error_type, error_msg) = NBARGERROR
details = 'Running following code :\n---\n{0}\n---'.format(bad_code)
if PythonEnvRange(version_range, interpreters).contains_current_env():
exc = get_exception(bad_code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(bad_code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, sugg, details)
</DeepExtract>
<DeepExtract>
details = 'Running following code :\n---\n{0}\n---'.format(good_code)
if PythonEnvRange(version_range, interpreters).contains_current_env():
exc = get_exception(good_code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
def test_nb_args(self):
"""Should have 1 arg."""
(typo, good) = ('1, 2', '1')
function = 'def {0}({1}):\n{2}\n'.format(name, 'a', indent_code(body))
if '{0}' is None:
func_code = function
else:
call = '{0}({1})\n'.format(name, '{0}')
func_code = function + call
indent_body = indent_code(body, tab='\t\t')
method = 'def {0}({1}):\n{2}\n'.format(name, 'self, a', indent_code(body))
class_def = 'class {0}():\n{1}\n'.format(class_name, indent_code(method))
if '{0}' is None:
meth_code = class_def
else:
call = '{0}().{1}({2})\n'.format(class_name, name, '{0}')
meth_code = class_def + call
for code in [func_code, meth_code]:
(bad_code, good_code) = [code.format(arg) for arg in args]
sugg = sorted(listify(sugg, [], str))
(error_type, error_msg) = NBARGERROR
details = 'Running following code :\n---\n{0}\n---'.format(bad_code)
if PythonEnvRange(version_range, interpreters).contains_current_env():
exc = get_exception(bad_code)
self.assertFalse(exc is None, 'No exc thrown.' + details)
(type_caught, value, traceback) = exc
suggestions = sorted(get_suggestions_for_exception(value, traceback))
self.log_exception(bad_code, exc, suggestions)
self.assertTrue(isinstance(value, type_caught))
self.assertTrue(issubclass(type_caught, error_type), '{0} ({1}) not a subclass of {2}'.format(type_caught, value, error_type) + details)
msg = next((a for a in value.args if isinstance(a, str)), '')
if error_msg:
self.assertRegexp(msg, error_msg, details)
self.assertEqual(suggestions, sugg, details)
details = 'Running following code :\n---\n{0}\n---'.format(good_code)
if PythonEnvRange(version_range, interpreters).contains_current_env():
exc = get_exception(good_code)
self.assertTrue(exc is None, 'Exc thrown : ' + str(exc) + details)
</DeepExtract>
|
DidYouMean-Python
|
positive
|
def firstMissingPositive(self, nums: List[int]) -> int:
<DeepExtract>
j = 0
newlist = []
for i in range(len(nums)):
if nums[i] > 0:
newlist.append(nums[i])
nums = newlist
</DeepExtract>
if len(A) == 0:
return 1
if len(A) == 1:
return 1 if A[0] >= 2 else 2
else:
n = len(A)
for i in range(n):
item = abs(A[i])
if item <= n:
A[item - 1] = -1 * abs(A[item - 1])
res = len(A) + 1
for i in range(n):
if A[i] > 0:
return i + 1
return res
|
def firstMissingPositive(self, nums: List[int]) -> int:
j = 0
newlist = []
for i in range(len(nums)):
if nums[i] > 0:
newlist.append(nums[i])
nums = newlist
if len(A) == 0:
return 1
if len(A) == 1:
return 1 if A[0] >= 2 else 2
else:
n = len(A)
for i in range(n):
item = abs(A[i])
if item <= n:
A[item - 1] = -1 * abs(A[item - 1])
res = len(A) + 1
for i in range(n):
if A[i] > 0:
return i + 1
return res
|
Competitive_Programming
|
positive
|
def test_whenForwardStepGPU_thenStepIsOk(self):
output_size = 9
<DeepExtract>
self.decoder = Decoder(self.input_size_dim, self.hidden_size, self.num_layers, output_size, attention_mechanism=False)
self.decoder.to(self.a_torch_device)
self.decoder_input_setUp(self.a_torch_device)
</DeepExtract>
(predictions, hidden, att_weights) = self.decoder.forward(self.decoder_input, self.decoder_hidden_tensor, self.decoder_output, self.a_lengths_list)
<DeepExtract>
self.assertEqual(self.a_batch_size, predictions.shape[0])
self.assertEqual(output_size, predictions.shape[1])
</DeepExtract>
<DeepExtract>
for actual_prediction in hidden:
self.assertEqual(self.num_layers, actual_prediction.shape[0])
self.assertEqual(self.a_batch_size, actual_prediction.shape[1])
self.assertEqual(self.hidden_size, actual_prediction.shape[2])
</DeepExtract>
self.assertIsNone(att_weights)
|
def test_whenForwardStepGPU_thenStepIsOk(self):
output_size = 9
self.decoder = Decoder(self.input_size_dim, self.hidden_size, self.num_layers, output_size, attention_mechanism=False)
self.decoder.to(self.a_torch_device)
self.decoder_input_setUp(self.a_torch_device)
(predictions, hidden, att_weights) = self.decoder.forward(self.decoder_input, self.decoder_hidden_tensor, self.decoder_output, self.a_lengths_list)
self.assertEqual(self.a_batch_size, predictions.shape[0])
self.assertEqual(output_size, predictions.shape[1])
for actual_prediction in hidden:
self.assertEqual(self.num_layers, actual_prediction.shape[0])
self.assertEqual(self.a_batch_size, actual_prediction.shape[1])
self.assertEqual(self.hidden_size, actual_prediction.shape[2])
self.assertIsNone(att_weights)
|
deepparse
|
positive
|
def create_random_sentencepair(target_seq_length, rng, np_rng):
"""
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
"""
is_random_next = None
curr_strs = []
curr_str_types = []
curr_len = 0
while curr_len < 1:
curr_len = 0
doc_a = None
while doc_a is None:
if self.weighted:
<DeepExtract>
if self.weighting is not None:
idx = np_rng.randint(self.total_len)
doc_a_idx = bisect_right(self.weighting, idx)
else:
doc_a_idx = np_rng.randint(self.ds_len)
</DeepExtract>
else:
doc_a_idx = rng.randint(0, self.ds_len - 1)
<DeepExtract>
lines = self.get_doc(doc_a_idx).split('\n')
if self.presplit_sentences:
doc_a = [line for line in lines if line]
rtn = []
for line in lines:
if line != '':
rtn.extend(tokenize.sent_tokenize(line))
doc_a = rtn
</DeepExtract>
if not doc_a:
doc_a = None
random_start_a = rng.randint(0, len(doc_a) - 1)
while random_start_a < len(doc_a):
sentence = doc_a[random_start_a]
<DeepExtract>
tokens = self.tokenizer.EncodeAsIds(sentence).tokenization
str_type = 'str' + str(0)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
(sentence, sentence_types) = (tokens, token_types)
</DeepExtract>
curr_strs.append(sentence)
curr_str_types.append(sentence_types)
curr_len += len(sentence)
if random_start_a == len(doc_a) - 1 or curr_len >= target_seq_length:
break
random_start_a = random_start_a + 1
if curr_strs:
num_a = 1
if len(curr_strs) >= 2:
num_a = rng.randint(0, len(curr_strs))
tokens_a = []
token_types_a = []
for j in range(num_a):
tokens_a.extend(curr_strs[j])
token_types_a.extend(curr_str_types[j])
tokens_b = []
token_types_b = []
is_random_next = False
if len(curr_strs) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
b_len = 0
while b_len < 1:
doc_b = None
while doc_b is None:
doc_b_idx = rng.randint(0, self.ds_len - 2)
doc_b_idx += int(doc_b_idx >= doc_a_idx)
<DeepExtract>
lines = self.get_doc(doc_b_idx).split('\n')
if self.presplit_sentences:
doc_b = [line for line in lines if line]
rtn = []
for line in lines:
if line != '':
rtn.extend(tokenize.sent_tokenize(line))
doc_b = rtn
</DeepExtract>
if not doc_b:
doc_b = None
random_start_b = rng.randint(0, len(doc_b) - 1)
while random_start_b < len(doc_b):
sentence_b = doc_b[random_start_b]
<DeepExtract>
tokens = self.tokenizer.EncodeAsIds(sentence_b).tokenization
str_type = 'str' + str(1)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
(new_b_tokens, new_b_types) = (tokens, token_types)
</DeepExtract>
b_len += len(new_b_tokens)
tokens_b.extend(new_b_tokens)
token_types_b.extend(new_b_types)
if len(tokens_b) >= target_b_length:
break
random_start_b = random_start_b + 1
else:
is_random_next = False
for j in range(num_a, len(curr_strs)):
tokens_b.extend(curr_strs[j])
token_types_b.extend(curr_str_types[j])
return ((tokens_a, token_types_a), (tokens_b, token_types_b), is_random_next)
|
def create_random_sentencepair(target_seq_length, rng, np_rng):
"""
fetches a random sentencepair corresponding to rng state similar to
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L248-L294
"""
is_random_next = None
curr_strs = []
curr_str_types = []
curr_len = 0
while curr_len < 1:
curr_len = 0
doc_a = None
while doc_a is None:
if self.weighted:
if self.weighting is not None:
idx = np_rng.randint(self.total_len)
doc_a_idx = bisect_right(self.weighting, idx)
else:
doc_a_idx = np_rng.randint(self.ds_len)
else:
doc_a_idx = rng.randint(0, self.ds_len - 1)
lines = self.get_doc(doc_a_idx).split('\n')
if self.presplit_sentences:
doc_a = [line for line in lines if line]
rtn = []
for line in lines:
if line != '':
rtn.extend(tokenize.sent_tokenize(line))
doc_a = rtn
if not doc_a:
doc_a = None
random_start_a = rng.randint(0, len(doc_a) - 1)
while random_start_a < len(doc_a):
sentence = doc_a[random_start_a]
tokens = self.tokenizer.EncodeAsIds(sentence).tokenization
str_type = 'str' + str(0)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
(sentence, sentence_types) = (tokens, token_types)
curr_strs.append(sentence)
curr_str_types.append(sentence_types)
curr_len += len(sentence)
if random_start_a == len(doc_a) - 1 or curr_len >= target_seq_length:
break
random_start_a = random_start_a + 1
if curr_strs:
num_a = 1
if len(curr_strs) >= 2:
num_a = rng.randint(0, len(curr_strs))
tokens_a = []
token_types_a = []
for j in range(num_a):
tokens_a.extend(curr_strs[j])
token_types_a.extend(curr_str_types[j])
tokens_b = []
token_types_b = []
is_random_next = False
if len(curr_strs) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
b_len = 0
while b_len < 1:
doc_b = None
while doc_b is None:
doc_b_idx = rng.randint(0, self.ds_len - 2)
doc_b_idx += int(doc_b_idx >= doc_a_idx)
lines = self.get_doc(doc_b_idx).split('\n')
if self.presplit_sentences:
doc_b = [line for line in lines if line]
rtn = []
for line in lines:
if line != '':
rtn.extend(tokenize.sent_tokenize(line))
doc_b = rtn
if not doc_b:
doc_b = None
random_start_b = rng.randint(0, len(doc_b) - 1)
while random_start_b < len(doc_b):
sentence_b = doc_b[random_start_b]
tokens = self.tokenizer.EncodeAsIds(sentence_b).tokenization
str_type = 'str' + str(1)
token_types = [self.tokenizer.get_type(str_type).Id] * len(tokens)
(new_b_tokens, new_b_types) = (tokens, token_types)
b_len += len(new_b_tokens)
tokens_b.extend(new_b_tokens)
token_types_b.extend(new_b_types)
if len(tokens_b) >= target_b_length:
break
random_start_b = random_start_b + 1
else:
is_random_next = False
for j in range(num_a, len(curr_strs)):
tokens_b.extend(curr_strs[j])
token_types_b.extend(curr_str_types[j])
return ((tokens_a, token_types_a), (tokens_b, token_types_b), is_random_next)
|
CPM-1-Generate
|
positive
|
def run(self):
while not self.stopped():
try:
batch_idx = self.stream_batcher.work.get(block=False, timeout=1.0)
except:
continue
if self.randomize:
n = 0
while n - self.stream_batcher.batch_size + 1 <= 0:
shard_idx = self.rdm.choice(len(list(self.shard2batchidx.keys())), 1, p=self.shard_fractions)[0]
current_paths = self.paths[shard_idx]
<DeepExtract>
if isinstance(current_paths[0], list):
for paths in current_paths:
shuffle_idx = None
for path in paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx == None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
shuffle_idx = None
else:
shuffle_idx = None
for path in current_paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx is None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
</DeepExtract>
n = self.current_data[current_paths[0]].shape[0]
start = self.rdm.randint(0, n - self.stream_batcher.batch_size + 1)
end = start + self.stream_batcher.batch_size
<DeepExtract>
batch_parts = []
if isinstance(current_paths[0], list):
start = start[0]
end = end[1]
for i in range(len(current_paths[0])):
x1 = self.current_data[current_paths[0][i]][start:]
x2 = self.current_data[current_paths[1][i]][:end]
if len(x1.shape) == 1:
x = np.hstack([x1, x2])
else:
x = np.vstack([x1, x2])
batch_parts.append(x)
else:
for path in current_paths:
batch_parts.append(self.current_data[path][start:end])
batch_parts = batch_parts
</DeepExtract>
else:
if batch_idx not in self.batchidx2paths:
log.error('{0}, {1}', batch_idx, list(self.batchidx2paths.keys()))
current_paths = self.batchidx2paths[batch_idx]
(start, end) = self.batchidx2start_end[batch_idx]
<DeepExtract>
if isinstance(current_paths[0], list):
for paths in current_paths:
shuffle_idx = None
for path in paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx == None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
shuffle_idx = None
else:
shuffle_idx = None
for path in current_paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx is None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
</DeepExtract>
<DeepExtract>
batch_parts = []
if isinstance(current_paths[0], list):
start = start[0]
end = end[1]
for i in range(len(current_paths[0])):
x1 = self.current_data[current_paths[0][i]][start:]
x2 = self.current_data[current_paths[1][i]][:end]
if len(x1.shape) == 1:
x = np.hstack([x1, x2])
else:
x = np.vstack([x1, x2])
batch_parts.append(x)
else:
for path in current_paths:
batch_parts.append(self.current_data[path][start:end])
batch_parts = batch_parts
</DeepExtract>
<DeepExtract>
for (i, obs) in enumerate(self.stream_batcher.at_batch_prepared_observers):
self.t.tick(str(i))
batch_parts = obs.at_batch_prepared(batch_parts)
self.t.tick(str(i))
batch_parts = batch_parts
</DeepExtract>
self.stream_batcher.prepared_batches[batch_idx] = batch_parts
try:
self.stream_batcher.prepared_batchidx.put(batch_idx, block=False, timeout=1.0)
except:
continue
<DeepExtract>
total_bytes = 0
for (path, shard) in self.current_data.items():
total_bytes += shard.nbytes
GB_usage = total_bytes / 1024.0 ** 3.0
</DeepExtract>
if GB_usage > self.cache_size_GB:
<DeepExtract>
i = 0
n = len(self.cache_order)
while i < n:
if self.cache_order[i] in current_paths:
i += 1
continue
path = self.cache_order.pop(i)
self.current_data.pop(path, None)
GB_usage = self.determine_cache_size()
n -= 1
if GB_usage < self.cache_size_GB:
break
</DeepExtract>
self.batches_processes += 1
if self.batches_processes % 100 == 0:
if benchmark:
for (i, obs) in enumerate(self.stream_batcher.at_batch_prepared_observers):
t = self.t.tock(str(i))
|
def run(self):
while not self.stopped():
try:
batch_idx = self.stream_batcher.work.get(block=False, timeout=1.0)
except:
continue
if self.randomize:
n = 0
while n - self.stream_batcher.batch_size + 1 <= 0:
shard_idx = self.rdm.choice(len(list(self.shard2batchidx.keys())), 1, p=self.shard_fractions)[0]
current_paths = self.paths[shard_idx]
if isinstance(current_paths[0], list):
for paths in current_paths:
shuffle_idx = None
for path in paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx == None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
shuffle_idx = None
else:
shuffle_idx = None
for path in current_paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx is None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
n = self.current_data[current_paths[0]].shape[0]
start = self.rdm.randint(0, n - self.stream_batcher.batch_size + 1)
end = start + self.stream_batcher.batch_size
batch_parts = []
if isinstance(current_paths[0], list):
start = start[0]
end = end[1]
for i in range(len(current_paths[0])):
x1 = self.current_data[current_paths[0][i]][start:]
x2 = self.current_data[current_paths[1][i]][:end]
if len(x1.shape) == 1:
x = np.hstack([x1, x2])
else:
x = np.vstack([x1, x2])
batch_parts.append(x)
else:
for path in current_paths:
batch_parts.append(self.current_data[path][start:end])
batch_parts = batch_parts
else:
if batch_idx not in self.batchidx2paths:
log.error('{0}, {1}', batch_idx, list(self.batchidx2paths.keys()))
current_paths = self.batchidx2paths[batch_idx]
(start, end) = self.batchidx2start_end[batch_idx]
if isinstance(current_paths[0], list):
for paths in current_paths:
shuffle_idx = None
for path in paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx == None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
shuffle_idx = None
else:
shuffle_idx = None
for path in current_paths:
if path not in self.current_data:
ordered_data = load_data(path)
self.cache_order.append(path)
if shuffle_idx is None and self.randomize:
shuffle_idx = np.arange(ordered_data.shape[0])
self.rdm.shuffle(shuffle_idx)
if self.randomize:
data = np.copy(ordered_data[shuffle_idx])
del ordered_data
order_data = None
self.current_data[path] = data
else:
self.current_data[path] = ordered_data
batch_parts = []
if isinstance(current_paths[0], list):
start = start[0]
end = end[1]
for i in range(len(current_paths[0])):
x1 = self.current_data[current_paths[0][i]][start:]
x2 = self.current_data[current_paths[1][i]][:end]
if len(x1.shape) == 1:
x = np.hstack([x1, x2])
else:
x = np.vstack([x1, x2])
batch_parts.append(x)
else:
for path in current_paths:
batch_parts.append(self.current_data[path][start:end])
batch_parts = batch_parts
for (i, obs) in enumerate(self.stream_batcher.at_batch_prepared_observers):
self.t.tick(str(i))
batch_parts = obs.at_batch_prepared(batch_parts)
self.t.tick(str(i))
batch_parts = batch_parts
self.stream_batcher.prepared_batches[batch_idx] = batch_parts
try:
self.stream_batcher.prepared_batchidx.put(batch_idx, block=False, timeout=1.0)
except:
continue
total_bytes = 0
for (path, shard) in self.current_data.items():
total_bytes += shard.nbytes
GB_usage = total_bytes / 1024.0 ** 3.0
if GB_usage > self.cache_size_GB:
i = 0
n = len(self.cache_order)
while i < n:
if self.cache_order[i] in current_paths:
i += 1
continue
path = self.cache_order.pop(i)
self.current_data.pop(path, None)
GB_usage = self.determine_cache_size()
n -= 1
if GB_usage < self.cache_size_GB:
break
self.batches_processes += 1
if self.batches_processes % 100 == 0:
if benchmark:
for (i, obs) in enumerate(self.stream_batcher.at_batch_prepared_observers):
t = self.t.tock(str(i))
|
CPL
|
positive
|
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
<DeepExtract>
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
</DeepExtract>
return
user = users.get_current_user()
if not user:
request_handler.redirect(users.create_login_url(request_handler.request.uri))
return
<DeepExtract>
if self.flow is None:
redirect_uri = request_handler.request.relative_url(self._callback_path)
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)
</DeepExtract>
<DeepExtract>
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(), action_id=str(uri))
self.flow.params['state'] = uri + ':' + token
</DeepExtract>
self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
resp = method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
finally:
self.credentials = None
return resp
|
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
return
user = users.get_current_user()
if not user:
request_handler.redirect(users.create_login_url(request_handler.request.uri))
return
if self.flow is None:
redirect_uri = request_handler.request.relative_url(self._callback_path)
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret, self._scope, redirect_uri=redirect_uri, user_agent=self._user_agent, auth_uri=self._auth_uri, token_uri=self._token_uri, revoke_uri=self._revoke_uri, **self._kwargs)
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(), action_id=str(uri))
self.flow.params['state'] = uri + ':' + token
self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
resp = method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
finally:
self.credentials = None
return resp
|
CUPS-Cloud-Print
|
positive
|
def find_resources(files, tagname, attrname, filter=None):
"""
Search all files and return a list of local URIs from attrname attribute
values in tagname tags.
Handles HTML open and XHTML closed tags.
Non-local URIs are skipped.
files can be a file name or a list of file names.
The filter function takes a dictionary of tag attributes and returns True
if the URI is to be included.
"""
class FindResources(HTMLParser):
def handle_startendtag(self, tag, attrs):
<DeepExtract>
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
uri = urlparse(attrs[attrname])
if uri[0] in ('', 'file') and (not uri[1]) and uri[2]:
result.append(uri[2])
</DeepExtract>
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
uri = urlparse(attrs[attrname])
if uri[0] in ('', 'file') and (not uri[1]) and uri[2]:
result.append(uri[2])
if isinstance(files, str):
files = [files]
result = []
for filename in files:
<DeepExtract>
if OPTIONS.verbose or OPTIONS.dry_run:
infomsg('finding resources in: %s' % filename)
</DeepExtract>
if OPTIONS.dry_run:
continue
parser = FindResources()
with open(filename, 'rb') as open_file:
contents = open_file.read()
contents = contents.decode(get_encoding(contents))
parser.feed(contents)
parser.close()
result = list(set(result))
result.sort()
return result
|
def find_resources(files, tagname, attrname, filter=None):
"""
Search all files and return a list of local URIs from attrname attribute
values in tagname tags.
Handles HTML open and XHTML closed tags.
Non-local URIs are skipped.
files can be a file name or a list of file names.
The filter function takes a dictionary of tag attributes and returns True
if the URI is to be included.
"""
class FindResources(HTMLParser):
def handle_startendtag(self, tag, attrs):
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
uri = urlparse(attrs[attrname])
if uri[0] in ('', 'file') and (not uri[1]) and uri[2]:
result.append(uri[2])
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == tagname and (filter is None or filter(attrs)):
uri = urlparse(attrs[attrname])
if uri[0] in ('', 'file') and (not uri[1]) and uri[2]:
result.append(uri[2])
if isinstance(files, str):
files = [files]
result = []
for filename in files:
if OPTIONS.verbose or OPTIONS.dry_run:
infomsg('finding resources in: %s' % filename)
if OPTIONS.dry_run:
continue
parser = FindResources()
with open(filename, 'rb') as open_file:
contents = open_file.read()
contents = contents.decode(get_encoding(contents))
parser.feed(contents)
parser.close()
result = list(set(result))
result.sort()
return result
|
asciidoc-py
|
positive
|
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
<DeepExtract>
pass
</DeepExtract>
try:
return self.locked_delete()
finally:
<DeepExtract>
pass
</DeepExtract>
|
def delete(self):
"""Delete credential.
Frees any resources associated with storing the credential.
The Storage lock must *not* be held when this is called.
Returns:
None
"""
pass
try:
return self.locked_delete()
finally:
pass
</DeepExtract>
|
CalendarHangout
|
positive
|
@pyqtSlot()
def run(self):
"""
Run the snooper thread.
"""
try:
self.signals.started.emit()
<DeepExtract>
self.validate()
self.signals.activity.emit('Starting the volatility snooper...')
volatility_dict = {}
for (index, ticker) in enumerate(self.tickers):
if not self.running:
break
self.signals.activity.emit(f'Gathering volatility for {ticker}...')
self.signals.progress.emit(int(index / len(self.tickers) * 100))
data = self.binance_client.get_historical_klines(ticker, self.short_interval, self.get_starting_timestamp())
data_length = len(data)
multiplier = 2
impossible = False
while len(data) < self.periods + 1:
starting_timestamp = self.get_starting_timestamp(multiplier=multiplier)
data = self.binance_client.get_historical_klines(ticker, self.short_interval, starting_timestamp)
multiplier += 1
if len(data) == data_length:
impossible = True
break
data_length = len(data)
if impossible:
volatility_dict[ticker] = 'Not enough data. Maybe the ticker is too new.'
else:
data = [get_normalized_data(d) for d in data]
volatility_dict[ticker] = self.volatility_func(periods=self.periods, data=data)
volatility_dict = volatility_dict
</DeepExtract>
self.signals.finished.emit(volatility_dict)
except Exception as e:
algobot.MAIN_LOGGER.exception(repr(e))
self.signals.error.emit(str(e))
finally:
self.running = False
self.signals.restore.emit()
|
@pyqtSlot()
def run(self):
"""
Run the snooper thread.
"""
try:
self.signals.started.emit()
self.validate()
self.signals.activity.emit('Starting the volatility snooper...')
volatility_dict = {}
for (index, ticker) in enumerate(self.tickers):
if not self.running:
break
self.signals.activity.emit(f'Gathering volatility for {ticker}...')
self.signals.progress.emit(int(index / len(self.tickers) * 100))
data = self.binance_client.get_historical_klines(ticker, self.short_interval, self.get_starting_timestamp())
data_length = len(data)
multiplier = 2
impossible = False
while len(data) < self.periods + 1:
starting_timestamp = self.get_starting_timestamp(multiplier=multiplier)
data = self.binance_client.get_historical_klines(ticker, self.short_interval, starting_timestamp)
multiplier += 1
if len(data) == data_length:
impossible = True
break
data_length = len(data)
if impossible:
volatility_dict[ticker] = 'Not enough data. Maybe the ticker is too new.'
else:
data = [get_normalized_data(d) for d in data]
volatility_dict[ticker] = self.volatility_func(periods=self.periods, data=data)
volatility_dict = volatility_dict
self.signals.finished.emit(volatility_dict)
except Exception as e:
algobot.MAIN_LOGGER.exception(repr(e))
self.signals.error.emit(str(e))
finally:
self.running = False
self.signals.restore.emit()
|
algobot
|
positive
|
def test_can_use_at_name(self):
<DeepExtract>
self.assertEqual(self.get_twitter_cleaned_identifier('@callistoorg'), 'twitter:callistoorg')
</DeepExtract>
<DeepExtract>
form = MatchingRequiredForm({'twitter_identifier': '@callistoorgtoolong'}, view=self.mock_view)
self.assertFalse(form.is_valid())
</DeepExtract>
|
def test_can_use_at_name(self):
self.assertEqual(self.get_twitter_cleaned_identifier('@callistoorg'), 'twitter:callistoorg')
form = MatchingRequiredForm({'twitter_identifier': '@callistoorgtoolong'}, view=self.mock_view)
self.assertFalse(form.is_valid())
</DeepExtract>
|
callisto-core
|
positive
|
def test_should_return_the_suggestion_from_agent_ignoring_confidence_if_is_name_agent_command(mocker):
<DeepExtract>
agent = Mock(spec=Agent)
agent.agent_name = 'demo_agent'
mock_agent = agent
</DeepExtract>
mocker.patch.object(AgentDatasource, 'get_instances', return_value=[mock_agent], autospec=True)
action_to_execute = Action(suggested_command='command', confidence=0.0)
mock_agent.execute.return_value = action_to_execute
mocker.patch.object(ConfigStorage, 'read_config', return_value=PluginConfig(selected=['demo_agent'], default_orchestrator='max_orchestrator'), autospec=True)
message_handler = MessageHandler(ServerStatusDatasource(), AgentDatasource())
action = message_handler.process_message(COMMAND_NAME_AGENT_STATE)
assert action.suggested_command == action_to_execute.suggested_command
assert action.origin_command == command_state().command
assert not action.execute
assert not action.description
|
def test_should_return_the_suggestion_from_agent_ignoring_confidence_if_is_name_agent_command(mocker):
agent = Mock(spec=Agent)
agent.agent_name = 'demo_agent'
mock_agent = agent
mocker.patch.object(AgentDatasource, 'get_instances', return_value=[mock_agent], autospec=True)
action_to_execute = Action(suggested_command='command', confidence=0.0)
mock_agent.execute.return_value = action_to_execute
mocker.patch.object(ConfigStorage, 'read_config', return_value=PluginConfig(selected=['demo_agent'], default_orchestrator='max_orchestrator'), autospec=True)
message_handler = MessageHandler(ServerStatusDatasource(), AgentDatasource())
action = message_handler.process_message(COMMAND_NAME_AGENT_STATE)
assert action.suggested_command == action_to_execute.suggested_command
assert action.origin_command == command_state().command
assert not action.execute
assert not action.description
|
clai
|
positive
|
def __setitem__(self, key, values):
<DeepExtract>
key = normalize_index(key, self.obj.shape)
l = []
for (n, i) in enumerate(key):
if type(i) is slice:
start = i.start if i.start > 0 else None
stop = i.stop if i.stop > -1 else None
stop = None if stop == self.obj.shape[n] else stop
step = None if start is None and stop is None else i.step
l.append(slice(start, stop, step))
else:
l.append(i)
key = tuple(l)
key = key
</DeepExtract>
if self.obj.array_backend == 'sparse':
<DeepExtract>
check = (self.obj.values.coords[0] == key[0]) * (self.obj.values.coords[1] == key[1]) * (self.obj.values.coords[2] == key[2]) * (self.obj.values.coords[3] == key[3])
if check.max():
data_index = np.where(check == True)[0][0]
self.obj.values.data[data_index] = values
else:
self.obj.values.coords = np.concatenate((self.obj.values.coords, np.array(key)[:, None]), 1)
self.obj.values.data = np.concatenate((self.obj.values.data, np.array([values])), 0)
self.obj.values = self.obj.get_array_module()(self.obj.values.coords, self.obj.values.data, prune=True, has_duplicates=False, shape=self.obj.shape, fill_value=self.obj.values.fill_value)
</DeepExtract>
else:
super().__setitem__(key, values)
|
def __setitem__(self, key, values):
key = normalize_index(key, self.obj.shape)
l = []
for (n, i) in enumerate(key):
if type(i) is slice:
start = i.start if i.start > 0 else None
stop = i.stop if i.stop > -1 else None
stop = None if stop == self.obj.shape[n] else stop
step = None if start is None and stop is None else i.step
l.append(slice(start, stop, step))
else:
l.append(i)
key = tuple(l)
key = key
if self.obj.array_backend == 'sparse':
check = (self.obj.values.coords[0] == key[0]) * (self.obj.values.coords[1] == key[1]) * (self.obj.values.coords[2] == key[2]) * (self.obj.values.coords[3] == key[3])
if check.max():
data_index = np.where(check == True)[0][0]
self.obj.values.data[data_index] = values
else:
self.obj.values.coords = np.concatenate((self.obj.values.coords, np.array(key)[:, None]), 1)
self.obj.values.data = np.concatenate((self.obj.values.data, np.array([values])), 0)
self.obj.values = self.obj.get_array_module()(self.obj.values.coords, self.obj.values.data, prune=True, has_duplicates=False, shape=self.obj.shape, fill_value=self.obj.values.fill_value)
else:
super().__setitem__(key, values)
|
chainladder-python
|
positive
|
def close(self):
if self.mode != 'r':
<DeepExtract>
nbattrs = self.path_join(self.machines_dir, '.nbattrs')
with open(nbattrs, mode='w') as f:
f.write(self._machine_nbattr.render(encoding='utf-8', fragment=True, pretty=True))
</DeepExtract>
super(MtzDistribution, self).close()
|
def close(self):
if self.mode != 'r':
nbattrs = self.path_join(self.machines_dir, '.nbattrs')
with open(nbattrs, mode='w') as f:
f.write(self._machine_nbattr.render(encoding='utf-8', fragment=True, pretty=True))
super(MtzDistribution, self).close()
|
canari3
|
positive
|
def __init__(self, root='data', verbose=True, **kwargs):
super(MSMT17, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'MSMT17_V1/train')
self.test_dir = osp.join(self.dataset_dir, 'MSMT17_V1/test')
self.list_train_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_train.txt')
self.list_val_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_val.txt')
self.list_query_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_query.txt')
self.list_gallery_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_gallery.txt')
<DeepExtract>
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.test_dir):
raise RuntimeError("'{}' is not available".format(self.test_dir))
</DeepExtract>
<DeepExtract>
with open(self.list_train_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.train_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
train = dataset
</DeepExtract>
<DeepExtract>
with open(self.list_query_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.test_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
query = dataset
</DeepExtract>
<DeepExtract>
with open(self.list_gallery_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.test_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
gallery = dataset
</DeepExtract>
if verbose:
print('=> MSMT17 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
|
def __init__(self, root='data', verbose=True, **kwargs):
super(MSMT17, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'MSMT17_V1/train')
self.test_dir = osp.join(self.dataset_dir, 'MSMT17_V1/test')
self.list_train_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_train.txt')
self.list_val_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_val.txt')
self.list_query_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_query.txt')
self.list_gallery_path = osp.join(self.dataset_dir, 'MSMT17_V1/list_gallery.txt')
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.test_dir):
raise RuntimeError("'{}' is not available".format(self.test_dir))
with open(self.list_train_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.train_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
train = dataset
with open(self.list_query_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.test_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
query = dataset
with open(self.list_gallery_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for (img_idx, img_info) in enumerate(lines):
(img_path, pid) = img_info.split(' ')
pid = int(pid)
camid = int(img_path.split('_')[2]) - 1
img_path = osp.join(self.test_dir, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_pids = len(pid_container)
for (idx, pid) in enumerate(pid_container):
assert idx == pid, 'See code comment for explanation'
gallery = dataset
if verbose:
print('=> MSMT17 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
|
ABD-Net
|
positive
|
def train(gumbel_hard, optimGD, lr, anneal_rate, anneal_interval, num_epochs=50, temp_init=3.0, plot_colour='b-'):
prefix = '{}_{}_{}_{}_{}'.format(gumbel_hard, optimGD, lr, anneal_rate, anneal_interval)
global log_file
log_file = open('./gen_logs/%s_log.txt' % prefix, 'w')
log_file.write('Loading data...\n')
source = '/u/jacobath/cortex-data/basic/mnist_binarized_salakhutdinov.pkl.gz'
<DeepExtract>
print('Reading MNIST, ', 'train')
with gzip.open(source, 'rb') as f:
x = cPickle.load(f)
if 'train' == 'train':
data = np.float32(x[0][0])
elif 'train' == 'valid':
data = np.float32(x[1][0])
elif 'train' == 'test':
data = np.float32(x[2][0])
else:
raise ValueError()
data = np.reshape(data, (-1, 1, 28, 28))
X_train = data
</DeepExtract>
noise_var = T.matrix('noise')
input_var = T.tensor4('inputs')
temperature = T.scalar('Temp')
tau = temp_init
log_file.write('Building model and compiling functions...\n')
<DeepExtract>
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm
from lasagne.nonlinearities import sigmoid
layer = InputLayer(shape=(None, 100), input_var=noise_var)
layer = batch_norm(DenseLayer(layer, 1024))
layer = batch_norm(DenseLayer(layer, 128 * 7 * 7))
layer = ReshapeLayer(layer, ([0], 128, 7, 7))
layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, pad=2))
layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2, nonlinearity=lasagne.nonlinearities.identity)
layer = ReshapeLayer(layer, (-1, 28 * 28))
layer = DenseLayer(layer, 28 * 28 * 2, nonlinearity=None)
layer = GumbelSoftmaxLayer(layer, K=28, hard=gumbel_hard, temperature=temperature)
print('Generator output:', layer.output_shape)
generator = layer
</DeepExtract>
<DeepExtract>
from lasagne.layers import InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm
from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
from lasagne.nonlinearities import LeakyRectify, sigmoid
lrelu = LeakyRectify(0.2)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
layer = Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = DenseLayer(layer, 1024, nonlinearity=lrelu)
layer = DenseLayer(layer, 1, nonlinearity=None)
print('Discriminator output:', layer.output_shape)
discriminator = layer
</DeepExtract>
log_file.write('Generator and discimininator built...\n')
log_file.flush()
real_out = lasagne.layers.get_output(discriminator)
fake_out = lasagne.layers.get_output(discriminator, lasagne.layers.get_output(generator))
generator_loss = T.nnet.softplus(-fake_out).mean()
discriminator_loss = T.nnet.softplus(-real_out).mean() + T.nnet.softplus(-fake_out).mean() + fake_out.mean()
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
discriminator_params = lasagne.layers.get_all_params(discriminator, trainable=True)
if optimGD == 'adam':
updates = lasagne.updates.adam(generator_loss, generator_params, learning_rate=lr, beta1=0.5)
updates.update(lasagne.updates.adam(discriminator_loss, discriminator_params, learning_rate=lr, beta1=0.5))
elif optimGD == 'sgd':
updates = lasagne.updates.sgd(generator_loss, generator_params, learning_rate=lr)
updates.update(lasagne.updates.sgd(discriminator_loss, discriminator_params, learning_rate=lr))
else:
updates = lasagne.updates.rmsprop(generator_loss, generator_params, learning_rate=lr)
updates.update(lasagne.updates.rmsprop(discriminator_loss, discriminator_params, learning_rate=lr))
log_file.write('Compiling train function...\n')
log_file.flush()
train_fn = theano.function([noise_var, input_var, temperature], [(real_out > 0.0).mean(), (fake_out < 0.0).mean(), generator_loss, discriminator_loss, temperature], updates=updates, allow_input_downcast=True, on_unused_input='ignore')
log_file.write('Compiling generation function...\n')
log_file.flush()
gen_fn = theano.function([noise_var, temperature], lasagne.layers.get_output(generator, deterministic=True), allow_input_downcast=True, on_unused_input='ignore')
log_file.write('Starting training...\n')
log_file.flush()
counter = 0
gen_losses = []
mean_losses = []
p_fake = []
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
print('Epoch: ', epoch)
for batch in iterate_minibatches(X_train, 128, shuffle=True):
inputs = np.array(batch, dtype=np.float32)
noise = lasagne.utils.floatX(np.random.rand(len(inputs), 100))
train_out = train_fn(noise, inputs, tau)
train_err += np.array(train_out)
gen_losses.append(train_out[2])
p_fake.append(1 - train_out[1])
mean_losses.append(np.mean(gen_losses[:-50]))
train_batches += 1
counter += 1
if counter % anneal_interval == 0:
tau = np.maximum(tau * np.exp(-anneal_rate * counter), 0.5)
log_file.write('Epoch {} of {} took {:.3f}s\n'.format(epoch + 1, num_epochs, time.time() - start_time))
log_file.write(' training loss:\t\t{}\n'.format(train_err / train_batches))
log_file.flush()
if epoch % 1 == 0:
log_file.write('Generating Samples...\n')
samples = gen_fn(lasagne.utils.floatX(np.random.rand(100, 100)), tau)
import matplotlib.pyplot as plt
plt.imsave('./gen_images/' + prefix + '_epoch_{}'.format(epoch) + '.png', samples.reshape(10, 10, 28, 28).transpose(0, 2, 1, 3).reshape(10 * 28, 10 * 28), cmap='gray')
import matplotlib.pyplot as plt
print('Plotting plot...')
if gumbel_hard:
name = 'ST - Gumbel Softmax GAN'
else:
name = 'Gumbel Softmax GAN'
label = '${}$'.format(name)
plt.plot(p_fake, plot_colour, label=label)
|
def train(gumbel_hard, optimGD, lr, anneal_rate, anneal_interval, num_epochs=50, temp_init=3.0, plot_colour='b-'):
prefix = '{}_{}_{}_{}_{}'.format(gumbel_hard, optimGD, lr, anneal_rate, anneal_interval)
global log_file
log_file = open('./gen_logs/%s_log.txt' % prefix, 'w')
log_file.write('Loading data...\n')
source = '/u/jacobath/cortex-data/basic/mnist_binarized_salakhutdinov.pkl.gz'
print('Reading MNIST, ', 'train')
with gzip.open(source, 'rb') as f:
x = cPickle.load(f)
if 'train' == 'train':
data = np.float32(x[0][0])
elif 'train' == 'valid':
data = np.float32(x[1][0])
elif 'train' == 'test':
data = np.float32(x[2][0])
else:
raise ValueError()
data = np.reshape(data, (-1, 1, 28, 28))
X_train = data
noise_var = T.matrix('noise')
input_var = T.tensor4('inputs')
temperature = T.scalar('Temp')
tau = temp_init
log_file.write('Building model and compiling functions...\n')
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer, batch_norm
from lasagne.nonlinearities import sigmoid
layer = InputLayer(shape=(None, 100), input_var=noise_var)
layer = batch_norm(DenseLayer(layer, 1024))
layer = batch_norm(DenseLayer(layer, 128 * 7 * 7))
layer = ReshapeLayer(layer, ([0], 128, 7, 7))
layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, pad=2))
layer = Deconv2DLayer(layer, 1, 5, stride=2, pad=2, nonlinearity=lasagne.nonlinearities.identity)
layer = ReshapeLayer(layer, (-1, 28 * 28))
layer = DenseLayer(layer, 28 * 28 * 2, nonlinearity=None)
layer = GumbelSoftmaxLayer(layer, K=28, hard=gumbel_hard, temperature=temperature)
print('Generator output:', layer.output_shape)
generator = layer
from lasagne.layers import InputLayer, Conv2DLayer, ReshapeLayer, DenseLayer, batch_norm
from lasagne.layers.dnn import Conv2DDNNLayer as Conv2DLayer
from lasagne.nonlinearities import LeakyRectify, sigmoid
lrelu = LeakyRectify(0.2)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
layer = Conv2DLayer(layer, 64, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = Conv2DLayer(layer, 128, 5, stride=2, pad=2, nonlinearity=lrelu)
layer = DenseLayer(layer, 1024, nonlinearity=lrelu)
layer = DenseLayer(layer, 1, nonlinearity=None)
print('Discriminator output:', layer.output_shape)
discriminator = layer
log_file.write('Generator and discimininator built...\n')
log_file.flush()
real_out = lasagne.layers.get_output(discriminator)
fake_out = lasagne.layers.get_output(discriminator, lasagne.layers.get_output(generator))
generator_loss = T.nnet.softplus(-fake_out).mean()
discriminator_loss = T.nnet.softplus(-real_out).mean() + T.nnet.softplus(-fake_out).mean() + fake_out.mean()
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
discriminator_params = lasagne.layers.get_all_params(discriminator, trainable=True)
if optimGD == 'adam':
updates = lasagne.updates.adam(generator_loss, generator_params, learning_rate=lr, beta1=0.5)
updates.update(lasagne.updates.adam(discriminator_loss, discriminator_params, learning_rate=lr, beta1=0.5))
elif optimGD == 'sgd':
updates = lasagne.updates.sgd(generator_loss, generator_params, learning_rate=lr)
updates.update(lasagne.updates.sgd(discriminator_loss, discriminator_params, learning_rate=lr))
else:
updates = lasagne.updates.rmsprop(generator_loss, generator_params, learning_rate=lr)
updates.update(lasagne.updates.rmsprop(discriminator_loss, discriminator_params, learning_rate=lr))
log_file.write('Compiling train function...\n')
log_file.flush()
train_fn = theano.function([noise_var, input_var, temperature], [(real_out > 0.0).mean(), (fake_out < 0.0).mean(), generator_loss, discriminator_loss, temperature], updates=updates, allow_input_downcast=True, on_unused_input='ignore')
log_file.write('Compiling generation function...\n')
log_file.flush()
gen_fn = theano.function([noise_var, temperature], lasagne.layers.get_output(generator, deterministic=True), allow_input_downcast=True, on_unused_input='ignore')
log_file.write('Starting training...\n')
log_file.flush()
counter = 0
gen_losses = []
mean_losses = []
p_fake = []
for epoch in range(num_epochs):
train_err = 0
train_batches = 0
start_time = time.time()
print('Epoch: ', epoch)
for batch in iterate_minibatches(X_train, 128, shuffle=True):
inputs = np.array(batch, dtype=np.float32)
noise = lasagne.utils.floatX(np.random.rand(len(inputs), 100))
train_out = train_fn(noise, inputs, tau)
train_err += np.array(train_out)
gen_losses.append(train_out[2])
p_fake.append(1 - train_out[1])
mean_losses.append(np.mean(gen_losses[:-50]))
train_batches += 1
counter += 1
if counter % anneal_interval == 0:
tau = np.maximum(tau * np.exp(-anneal_rate * counter), 0.5)
log_file.write('Epoch {} of {} took {:.3f}s\n'.format(epoch + 1, num_epochs, time.time() - start_time))
log_file.write(' training loss:\t\t{}\n'.format(train_err / train_batches))
log_file.flush()
if epoch % 1 == 0:
log_file.write('Generating Samples...\n')
samples = gen_fn(lasagne.utils.floatX(np.random.rand(100, 100)), tau)
import matplotlib.pyplot as plt
plt.imsave('./gen_images/' + prefix + '_epoch_{}'.format(epoch) + '.png', samples.reshape(10, 10, 28, 28).transpose(0, 2, 1, 3).reshape(10 * 28, 10 * 28), cmap='gray')
import matplotlib.pyplot as plt
print('Plotting plot...')
if gumbel_hard:
name = 'ST - Gumbel Softmax GAN'
else:
name = 'Gumbel Softmax GAN'
label = '${}$'.format(name)
plt.plot(p_fake, plot_colour, label=label)
|
BGAN
|
positive
|
def _get_extra_value_as_list(key):
<DeepExtract>
v = [extra['value'] for extra in dataset['extras'] if extra['key'] == key]
value = v[0] if v else None
</DeepExtract>
return json.loads(value) if value else []
|
def _get_extra_value_as_list(key):
v = [extra['value'] for extra in dataset['extras'] if extra['key'] == key]
value = v[0] if v else None
return json.loads(value) if value else []
|
ckanext-dcat
|
positive
|
def _putresourcedict(self):
<DeepExtract>
if PY3K and isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', bytes):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]'.decode('latin1')
elif not PY3K and isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', unicode):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]'.encode('latin1')
elif not isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', basestring):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = str('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
if self.state == 2:
self.pages[self.page] += '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' + '\n'
else:
self.buffer += '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' + '\n'
</DeepExtract>
<DeepExtract>
if PY3K and isinstance('/Font <<', bytes):
'/Font <<' = '/Font <<'.decode('latin1')
elif not PY3K and isinstance('/Font <<', unicode):
'/Font <<' = '/Font <<'.encode('latin1')
elif not isinstance('/Font <<', basestring):
'/Font <<' = str('/Font <<')
if self.state == 2:
self.pages[self.page] += '/Font <<' + '\n'
else:
self.buffer += '/Font <<' + '\n'
</DeepExtract>
f = [(x['i'], x['n']) for x in self.fonts.values()]
f.sort()
for (idx, n) in f:
<DeepExtract>
if PY3K and isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', bytes):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = '/F' + str(idx) + ' ' + str(n) + ' 0 R'.decode('latin1')
elif not PY3K and isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', unicode):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = '/F' + str(idx) + ' ' + str(n) + ' 0 R'.encode('latin1')
elif not isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', basestring):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = str('/F' + str(idx) + ' ' + str(n) + ' 0 R')
if self.state == 2:
self.pages[self.page] += '/F' + str(idx) + ' ' + str(n) + ' 0 R' + '\n'
else:
self.buffer += '/F' + str(idx) + ' ' + str(n) + ' 0 R' + '\n'
</DeepExtract>
<DeepExtract>
if PY3K and isinstance('>>', bytes):
'>>' = '>>'.decode('latin1')
elif not PY3K and isinstance('>>', unicode):
'>>' = '>>'.encode('latin1')
elif not isinstance('>>', basestring):
'>>' = str('>>')
if self.state == 2:
self.pages[self.page] += '>>' + '\n'
else:
self.buffer += '>>' + '\n'
</DeepExtract>
<DeepExtract>
if PY3K and isinstance('/XObject <<', bytes):
'/XObject <<' = '/XObject <<'.decode('latin1')
elif not PY3K and isinstance('/XObject <<', unicode):
'/XObject <<' = '/XObject <<'.encode('latin1')
elif not isinstance('/XObject <<', basestring):
'/XObject <<' = str('/XObject <<')
if self.state == 2:
self.pages[self.page] += '/XObject <<' + '\n'
else:
self.buffer += '/XObject <<' + '\n'
</DeepExtract>
<DeepExtract>
i = [(x['i'], x['n']) for x in self.images.values()]
i.sort()
for (idx, n) in i:
self._out('/I' + str(idx) + ' ' + str(n) + ' 0 R')
</DeepExtract>
<DeepExtract>
if PY3K and isinstance('>>', bytes):
'>>' = '>>'.decode('latin1')
elif not PY3K and isinstance('>>', unicode):
'>>' = '>>'.encode('latin1')
elif not isinstance('>>', basestring):
'>>' = str('>>')
if self.state == 2:
self.pages[self.page] += '>>' + '\n'
else:
self.buffer += '>>' + '\n'
</DeepExtract>
|
def _putresourcedict(self):
if PY3K and isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', bytes):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]'.decode('latin1')
elif not PY3K and isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', unicode):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]'.encode('latin1')
elif not isinstance('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]', basestring):
'/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' = str('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
if self.state == 2:
self.pages[self.page] += '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' + '\n'
else:
self.buffer += '/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]' + '\n'
if PY3K and isinstance('/Font <<', bytes):
'/Font <<' = '/Font <<'.decode('latin1')
elif not PY3K and isinstance('/Font <<', unicode):
'/Font <<' = '/Font <<'.encode('latin1')
elif not isinstance('/Font <<', basestring):
'/Font <<' = str('/Font <<')
if self.state == 2:
self.pages[self.page] += '/Font <<' + '\n'
else:
self.buffer += '/Font <<' + '\n'
f = [(x['i'], x['n']) for x in self.fonts.values()]
f.sort()
for (idx, n) in f:
if PY3K and isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', bytes):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = '/F' + str(idx) + ' ' + str(n) + ' 0 R'.decode('latin1')
elif not PY3K and isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', unicode):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = '/F' + str(idx) + ' ' + str(n) + ' 0 R'.encode('latin1')
elif not isinstance('/F' + str(idx) + ' ' + str(n) + ' 0 R', basestring):
'/F' + str(idx) + ' ' + str(n) + ' 0 R' = str('/F' + str(idx) + ' ' + str(n) + ' 0 R')
if self.state == 2:
self.pages[self.page] += '/F' + str(idx) + ' ' + str(n) + ' 0 R' + '\n'
else:
self.buffer += '/F' + str(idx) + ' ' + str(n) + ' 0 R' + '\n'
if PY3K and isinstance('>>', bytes):
'>>' = '>>'.decode('latin1')
elif not PY3K and isinstance('>>', unicode):
'>>' = '>>'.encode('latin1')
elif not isinstance('>>', basestring):
'>>' = str('>>')
if self.state == 2:
self.pages[self.page] += '>>' + '\n'
else:
self.buffer += '>>' + '\n'
if PY3K and isinstance('/XObject <<', bytes):
'/XObject <<' = '/XObject <<'.decode('latin1')
elif not PY3K and isinstance('/XObject <<', unicode):
'/XObject <<' = '/XObject <<'.encode('latin1')
elif not isinstance('/XObject <<', basestring):
'/XObject <<' = str('/XObject <<')
if self.state == 2:
self.pages[self.page] += '/XObject <<' + '\n'
else:
self.buffer += '/XObject <<' + '\n'
i = [(x['i'], x['n']) for x in self.images.values()]
i.sort()
for (idx, n) in i:
self._out('/I' + str(idx) + ' ' + str(n) + ' 0 R')
if PY3K and isinstance('>>', bytes):
'>>' = '>>'.decode('latin1')
elif not PY3K and isinstance('>>', unicode):
'>>' = '>>'.encode('latin1')
elif not isinstance('>>', basestring):
'>>' = str('>>')
if self.state == 2:
self.pages[self.page] += '>>' + '\n'
else:
self.buffer += '>>' + '\n'
</DeepExtract>
|
endesive
|
positive
|
def _hit_to_row(hit: Mapping[str, Any]) -> Mapping[str, Any]:
row: Dict[str, Any] = {}
for k in hit.keys():
if k == '_source':
<DeepExtract>
fields = {}
for field in hit['_source']:
if isinstance(hit['_source'][field], dict):
nested_fields = _resolve_fields(hit['_source'][field])
for (n_field, val) in nested_fields.items():
fields[f'{field}.{n_field}'] = val
else:
fields[field] = hit['_source'][field]
solved_fields = fields
</DeepExtract>
row.update(solved_fields)
elif k.startswith('_'):
row[k] = hit[k]
return row
|
def _hit_to_row(hit: Mapping[str, Any]) -> Mapping[str, Any]:
row: Dict[str, Any] = {}
for k in hit.keys():
if k == '_source':
fields = {}
for field in hit['_source']:
if isinstance(hit['_source'][field], dict):
nested_fields = _resolve_fields(hit['_source'][field])
for (n_field, val) in nested_fields.items():
fields[f'{field}.{n_field}'] = val
else:
fields[field] = hit['_source'][field]
solved_fields = fields
row.update(solved_fields)
elif k.startswith('_'):
row[k] = hit[k]
return row
|
aws-data-wrangler
|
positive
|
def process_bases(self, node, elem, bases, tagname):
for base in bases:
child = ElementTree.Element(tagname)
<DeepExtract>
if base.access == cindex.AccessSpecifier.PROTECTED:
child.set('access', 'protected')
elif base.access == cindex.AccessSpecifier.PRIVATE:
child.set('access', 'private')
elif base.access == cindex.AccessSpecifier.PUBLIC:
child.set('access', 'public')
</DeepExtract>
child.append(self.type_to_xml(base.type, node))
if base.node and (not base.node.comment is None) and base.node.comment.brief:
child.append(self.doc_to_xml(base.node, base.node.comment.brief, 'brief'))
elem.append(child)
|
def process_bases(self, node, elem, bases, tagname):
for base in bases:
child = ElementTree.Element(tagname)
if base.access == cindex.AccessSpecifier.PROTECTED:
child.set('access', 'protected')
elif base.access == cindex.AccessSpecifier.PRIVATE:
child.set('access', 'private')
elif base.access == cindex.AccessSpecifier.PUBLIC:
child.set('access', 'public')
child.append(self.type_to_xml(base.type, node))
if base.node and (not base.node.comment is None) and base.node.comment.brief:
child.append(self.doc_to_xml(base.node, base.node.comment.brief, 'brief'))
elem.append(child)
|
cldoc
|
positive
|
def create_cli_app():
"""
Create a Flask application instance and validate the configuration for the Flask CLI.
:return: a Flask application object
:rtype: flask.Flask
"""
<DeepExtract>
connexion_app = connexion.FlaskApp(__name__, options={'swagger_ui': False})
app = connexion_app.app
if config_obj:
app.config.from_object(config_obj)
else:
load_config(app)
default_handler.setFormatter(logging.Formatter(fmt=app.config['CACHITO_LOG_FORMAT'], datefmt='%Y-%m-%d %H:%M:%S'))
app.logger.setLevel(app.config['CACHITO_LOG_LEVEL'])
for logger_name in app.config['CACHITO_ADDITIONAL_LOGGERS']:
logger = logging.getLogger(logger_name)
logger.setLevel(app.config['CACHITO_LOG_LEVEL'])
logger.addHandler(default_handler)
db.init_app(app)
migrations_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrations')
Migrate(app, db, directory=migrations_dir)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(user_loader)
login_manager.request_loader(load_user_from_request)
app.register_blueprint(docs)
path = Path(__file__).parent.absolute()
connexion_app.add_api(f'{path}/static/api_v1.yaml', strict_validation=True, validator_map={'body': RequestBodyValidator, 'parameter': ParameterValidator})
app.add_url_rule('/healthcheck', view_func=healthcheck)
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
app.register_error_handler(CachitoError, json_error)
app.register_error_handler(ClientError, json_error)
app.register_error_handler(ServerError, json_error)
app.register_error_handler(ValidationError, json_error)
app.register_error_handler(ContentManifestError, json_error)
app.register_error_handler(pydantic.ValidationError, validation_error)
init_metrics(app)
app = app
</DeepExtract>
validate_cachito_config(app.config, cli=True)
return app
|
def create_cli_app():
"""
Create a Flask application instance and validate the configuration for the Flask CLI.
:return: a Flask application object
:rtype: flask.Flask
"""
connexion_app = connexion.FlaskApp(__name__, options={'swagger_ui': False})
app = connexion_app.app
if config_obj:
app.config.from_object(config_obj)
else:
load_config(app)
default_handler.setFormatter(logging.Formatter(fmt=app.config['CACHITO_LOG_FORMAT'], datefmt='%Y-%m-%d %H:%M:%S'))
app.logger.setLevel(app.config['CACHITO_LOG_LEVEL'])
for logger_name in app.config['CACHITO_ADDITIONAL_LOGGERS']:
logger = logging.getLogger(logger_name)
logger.setLevel(app.config['CACHITO_LOG_LEVEL'])
logger.addHandler(default_handler)
db.init_app(app)
migrations_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrations')
Migrate(app, db, directory=migrations_dir)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(user_loader)
login_manager.request_loader(load_user_from_request)
app.register_blueprint(docs)
path = Path(__file__).parent.absolute()
connexion_app.add_api(f'{path}/static/api_v1.yaml', strict_validation=True, validator_map={'body': RequestBodyValidator, 'parameter': ParameterValidator})
app.add_url_rule('/healthcheck', view_func=healthcheck)
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
app.register_error_handler(CachitoError, json_error)
app.register_error_handler(ClientError, json_error)
app.register_error_handler(ServerError, json_error)
app.register_error_handler(ValidationError, json_error)
app.register_error_handler(ContentManifestError, json_error)
app.register_error_handler(pydantic.ValidationError, validation_error)
init_metrics(app)
app = app
validate_cachito_config(app.config, cli=True)
return app
|
cachito
|
positive
|
def find_section_containing(self, addr, skip_pseudo_objects=True):
"""
Find the section object that the address belongs to.
:param int addr: The address to test.
:param bool skip_pseudo_objects: Skip objects that CLE adds during loading.
:return: The section that the address belongs to, or None if the address does not belong to any section, or if
section information is not available.
:rtype: cle.Section
"""
<DeepExtract>
def _check_object_memory(obj_):
if isinstance(obj_.memory, Clemory):
if AT.from_va(addr, obj_).to_rva() in obj_.memory:
self._last_object = obj_
obj = obj_
obj = None
elif type(obj_.memory) is str:
self._last_object = obj_
obj = obj_
else:
raise CLEError('Unsupported memory type %s' % type(obj_.memory))
if self._last_object is not None and self._last_object.min_addr <= addr <= self._last_object.max_addr:
if not False:
obj = self._last_object
if not self._last_object.has_memory:
obj = self._last_object
o = _check_object_memory(self._last_object)
if o:
obj = o
if addr > self.max_addr or addr < self.min_addr:
obj = None
obj = key_bisect_floor_key(self.all_objects, addr, keyfunc=lambda obj: obj.min_addr)
if obj is None:
obj = None
if not obj.min_addr <= addr <= obj.max_addr:
obj = None
if not False:
self._last_object = obj
obj = obj
if not obj.has_memory:
self._last_object = obj
obj = obj
obj = _check_object_memory(obj)
</DeepExtract>
if obj is None:
return None
if skip_pseudo_objects and isinstance(obj, (ExternObject, KernelObject, TLSObject)):
return None
return obj.find_section_containing(addr)
|
def find_section_containing(self, addr, skip_pseudo_objects=True):
"""
Find the section object that the address belongs to.
:param int addr: The address to test.
:param bool skip_pseudo_objects: Skip objects that CLE adds during loading.
:return: The section that the address belongs to, or None if the address does not belong to any section, or if
section information is not available.
:rtype: cle.Section
"""
def _check_object_memory(obj_):
if isinstance(obj_.memory, Clemory):
if AT.from_va(addr, obj_).to_rva() in obj_.memory:
self._last_object = obj_
obj = obj_
obj = None
elif type(obj_.memory) is str:
self._last_object = obj_
obj = obj_
else:
raise CLEError('Unsupported memory type %s' % type(obj_.memory))
if self._last_object is not None and self._last_object.min_addr <= addr <= self._last_object.max_addr:
if not False:
obj = self._last_object
if not self._last_object.has_memory:
obj = self._last_object
o = _check_object_memory(self._last_object)
if o:
obj = o
if addr > self.max_addr or addr < self.min_addr:
obj = None
obj = key_bisect_floor_key(self.all_objects, addr, keyfunc=lambda obj: obj.min_addr)
if obj is None:
obj = None
if not obj.min_addr <= addr <= obj.max_addr:
obj = None
if not False:
self._last_object = obj
obj = obj
if not obj.has_memory:
self._last_object = obj
obj = obj
obj = _check_object_memory(obj)
if obj is None:
return None
if skip_pseudo_objects and isinstance(obj, (ExternObject, KernelObject, TLSObject)):
return None
return obj.find_section_containing(addr)
|
cle
|
positive
|
@lru_cache(maxsize=_LRU_CACHE_SIZE)
def integrals(self, start: XValue[T], stop: XValue[T], step: XValueDiff[T], transform: Callable[[XValueDiff[T]], float]=lambda x: cast(float, x)) -> 'SortedDict[XValue[T], float]':
"""Compute a sequence of integrals of the function
:param start: lower bound of integral sequence
:param stop: upper bound of integral sequence
:param step: width of each "chunk" of the integral sequence
:param transform: function to apply to x-widths before computing the integral
:returns: a SortedDict of the numeric integral values of the function between start and stop;
each integral has a range of size `step`, and the key-value is the left endpoint of the chunk
"""
step = step or stop - start
if len(self.breakpoints) == 0:
step_width = transform(step)
range_width = transform(stop - start)
num_full_chunks = int(range_width // step_width)
sequence = SortedDict([(start + step * i, step_width * self._initial_value) for i in range(num_full_chunks)])
if range_width % step_width != 0:
sequence[start + step * num_full_chunks] = range_width % step_width * self._initial_value
return sequence
curr_xval = start
<DeepExtract>
if len(self.breakpoints) == 0 or start < self.breakpoints.keys()[0]:
curr_value = self._initial_value
else:
lower_index = self.breakpoints.bisect(start) - 1
curr_value = self.breakpoints.values()[lower_index]
</DeepExtract>
<DeepExtract>
try:
(breakpoint, value) = self.breakpoints.peekitem(self.breakpoints.bisect(start))
except IndexError:
self.breakpoints.bisect(start) = None
(breakpoint, value) = (None, self.breakpoints.values()[-1])
(next_index, next_breakpoint, next_value) = (self.breakpoints.bisect(start), breakpoint, value)
</DeepExtract>
sequence = SortedDict()
while curr_xval < stop:
orig_xval = curr_xval
next_xval = min(stop, curr_xval + step)
next_integral: float = 0
while next_breakpoint and next_xval >= next_breakpoint:
assert next_index is not None
next_integral += transform(next_breakpoint - curr_xval) * curr_value
curr_xval = next_breakpoint
curr_value = next_value
<DeepExtract>
try:
(breakpoint, value) = self.breakpoints.peekitem(next_index + 1)
except IndexError:
next_index + 1 = None
(breakpoint, value) = (None, self.breakpoints.values()[-1])
(next_index, next_breakpoint, next_value) = (next_index + 1, breakpoint, value)
</DeepExtract>
next_integral += transform(next_xval - curr_xval) * curr_value
sequence[orig_xval] = next_integral
curr_xval = next_xval
return sequence
|
@lru_cache(maxsize=_LRU_CACHE_SIZE)
def integrals(self, start: XValue[T], stop: XValue[T], step: XValueDiff[T], transform: Callable[[XValueDiff[T]], float]=lambda x: cast(float, x)) -> 'SortedDict[XValue[T], float]':
"""Compute a sequence of integrals of the function
:param start: lower bound of integral sequence
:param stop: upper bound of integral sequence
:param step: width of each "chunk" of the integral sequence
:param transform: function to apply to x-widths before computing the integral
:returns: a SortedDict of the numeric integral values of the function between start and stop;
each integral has a range of size `step`, and the key-value is the left endpoint of the chunk
"""
step = step or stop - start
if len(self.breakpoints) == 0:
step_width = transform(step)
range_width = transform(stop - start)
num_full_chunks = int(range_width // step_width)
sequence = SortedDict([(start + step * i, step_width * self._initial_value) for i in range(num_full_chunks)])
if range_width % step_width != 0:
sequence[start + step * num_full_chunks] = range_width % step_width * self._initial_value
return sequence
curr_xval = start
if len(self.breakpoints) == 0 or start < self.breakpoints.keys()[0]:
curr_value = self._initial_value
else:
lower_index = self.breakpoints.bisect(start) - 1
curr_value = self.breakpoints.values()[lower_index]
try:
(breakpoint, value) = self.breakpoints.peekitem(self.breakpoints.bisect(start))
except IndexError:
self.breakpoints.bisect(start) = None
(breakpoint, value) = (None, self.breakpoints.values()[-1])
(next_index, next_breakpoint, next_value) = (self.breakpoints.bisect(start), breakpoint, value)
sequence = SortedDict()
while curr_xval < stop:
orig_xval = curr_xval
next_xval = min(stop, curr_xval + step)
next_integral: float = 0
while next_breakpoint and next_xval >= next_breakpoint:
assert next_index is not None
next_integral += transform(next_breakpoint - curr_xval) * curr_value
curr_xval = next_breakpoint
curr_value = next_value
try:
(breakpoint, value) = self.breakpoints.peekitem(next_index + 1)
except IndexError:
next_index + 1 = None
(breakpoint, value) = (None, self.breakpoints.values()[-1])
(next_index, next_breakpoint, next_value) = (next_index + 1, breakpoint, value)
next_integral += transform(next_xval - curr_xval) * curr_value
sequence[orig_xval] = next_integral
curr_xval = next_xval
return sequence
|
clusterman
|
positive
|
def variable_mtrrs(apicid=bits.bsp_apicid()):
assert apicid in bits.cpus()
ia32_mtrrcap_msr = IA32_MTRRCAP(bits.rdmsr(apicid, IA32_MTRRCAP_REG))
ia32_mtrr_def_type_msr = IA32_MTRR_DEF_TYPE(bits.rdmsr(apicid, IA32_MTRR_DEF_TYPE_REG))
with ttypager.page():
print('Summary:')
print('Default memory type: {}'.format(_memory_type_str(ia32_mtrr_def_type_msr.type)))
for i in range(ia32_mtrrcap_msr.VCNT):
ia32_mtrr_physbase_msr = IA32_MTRR_PHYSBASE(bits.rdmsr(apicid, IA32_MTRR_PHYSBASEn_REG(i)))
ia32_mtrr_physmask_msr = IA32_MTRR_PHYSMASK(bits.rdmsr(apicid, IA32_MTRR_PHYSMASKn_REG(i)))
if ia32_mtrr_physmask_msr.V:
print('MTRR{}: type={:20} base={:10} size={:10}'.format(i, _memory_type_str(ia32_mtrr_physbase_msr.Type), _physbase_str(ia32_mtrr_physbase_msr.PhysBase), _physmask_str(ia32_mtrr_physmask_msr.PhysMask)))
print()
print(ia32_mtrrcap_msr, end='\n\n')
print(ia32_mtrr_def_type_msr, end='\n\n')
for i in range(ia32_mtrrcap_msr.VCNT):
<DeepExtract>
_IA32_MTRR_PHYSBASE = [IA32_MTRR_PHYSBASE0_REG, IA32_MTRR_PHYSBASE1_REG, IA32_MTRR_PHYSBASE2_REG, IA32_MTRR_PHYSBASE3_REG, IA32_MTRR_PHYSBASE4_REG, IA32_MTRR_PHYSBASE5_REG, IA32_MTRR_PHYSBASE6_REG, IA32_MTRR_PHYSBASE7_REG, IA32_MTRR_PHYSBASE8_REG, IA32_MTRR_PHYSBASE9_REG]
assert i in range(len(_IA32_MTRR_PHYSBASE))
msr_num = _IA32_MTRR_PHYSBASE[i]
</DeepExtract>
ia32_mtrr_physbase_msr = IA32_MTRR_PHYSBASE(bits.rdmsr(apicid, msr_num))
print('IA32_MTRR_PHYSBASE[{}] MSR {:#x}'.format(i, msr_num))
print(ia32_mtrr_physbase_msr, end='\n\n')
<DeepExtract>
_IA32_MTRR_PHYSMASK = [IA32_MTRR_PHYSMASK0_REG, IA32_MTRR_PHYSMASK1_REG, IA32_MTRR_PHYSMASK2_REG, IA32_MTRR_PHYSMASK3_REG, IA32_MTRR_PHYSMASK4_REG, IA32_MTRR_PHYSMASK5_REG, IA32_MTRR_PHYSMASK6_REG, IA32_MTRR_PHYSMASK7_REG, IA32_MTRR_PHYSMASK8_REG, IA32_MTRR_PHYSMASK9_REG]
assert i in range(len(_IA32_MTRR_PHYSMASK))
msr_num = _IA32_MTRR_PHYSMASK[i]
</DeepExtract>
ia32_mtrr_physmask_msr = IA32_MTRR_PHYSMASK(bits.rdmsr(apicid, msr_num))
print('IA32_MTRR_PHYSMASK[{}] MSR {:#x}'.format(i, msr_num))
print(ia32_mtrr_physmask_msr, end='\n\n')
|
def variable_mtrrs(apicid=bits.bsp_apicid()):
assert apicid in bits.cpus()
ia32_mtrrcap_msr = IA32_MTRRCAP(bits.rdmsr(apicid, IA32_MTRRCAP_REG))
ia32_mtrr_def_type_msr = IA32_MTRR_DEF_TYPE(bits.rdmsr(apicid, IA32_MTRR_DEF_TYPE_REG))
with ttypager.page():
print('Summary:')
print('Default memory type: {}'.format(_memory_type_str(ia32_mtrr_def_type_msr.type)))
for i in range(ia32_mtrrcap_msr.VCNT):
ia32_mtrr_physbase_msr = IA32_MTRR_PHYSBASE(bits.rdmsr(apicid, IA32_MTRR_PHYSBASEn_REG(i)))
ia32_mtrr_physmask_msr = IA32_MTRR_PHYSMASK(bits.rdmsr(apicid, IA32_MTRR_PHYSMASKn_REG(i)))
if ia32_mtrr_physmask_msr.V:
print('MTRR{}: type={:20} base={:10} size={:10}'.format(i, _memory_type_str(ia32_mtrr_physbase_msr.Type), _physbase_str(ia32_mtrr_physbase_msr.PhysBase), _physmask_str(ia32_mtrr_physmask_msr.PhysMask)))
print()
print(ia32_mtrrcap_msr, end='\n\n')
print(ia32_mtrr_def_type_msr, end='\n\n')
for i in range(ia32_mtrrcap_msr.VCNT):
_IA32_MTRR_PHYSBASE = [IA32_MTRR_PHYSBASE0_REG, IA32_MTRR_PHYSBASE1_REG, IA32_MTRR_PHYSBASE2_REG, IA32_MTRR_PHYSBASE3_REG, IA32_MTRR_PHYSBASE4_REG, IA32_MTRR_PHYSBASE5_REG, IA32_MTRR_PHYSBASE6_REG, IA32_MTRR_PHYSBASE7_REG, IA32_MTRR_PHYSBASE8_REG, IA32_MTRR_PHYSBASE9_REG]
assert i in range(len(_IA32_MTRR_PHYSBASE))
msr_num = _IA32_MTRR_PHYSBASE[i]
ia32_mtrr_physbase_msr = IA32_MTRR_PHYSBASE(bits.rdmsr(apicid, msr_num))
print('IA32_MTRR_PHYSBASE[{}] MSR {:#x}'.format(i, msr_num))
print(ia32_mtrr_physbase_msr, end='\n\n')
_IA32_MTRR_PHYSMASK = [IA32_MTRR_PHYSMASK0_REG, IA32_MTRR_PHYSMASK1_REG, IA32_MTRR_PHYSMASK2_REG, IA32_MTRR_PHYSMASK3_REG, IA32_MTRR_PHYSMASK4_REG, IA32_MTRR_PHYSMASK5_REG, IA32_MTRR_PHYSMASK6_REG, IA32_MTRR_PHYSMASK7_REG, IA32_MTRR_PHYSMASK8_REG, IA32_MTRR_PHYSMASK9_REG]
assert i in range(len(_IA32_MTRR_PHYSMASK))
msr_num = _IA32_MTRR_PHYSMASK[i]
ia32_mtrr_physmask_msr = IA32_MTRR_PHYSMASK(bits.rdmsr(apicid, msr_num))
print('IA32_MTRR_PHYSMASK[{}] MSR {:#x}'.format(i, msr_num))
print(ia32_mtrr_physmask_msr, end='\n\n')
|
bits
|
positive
|
def convert_field(name, value, model=None):
if model is None:
<DeepExtract>
model = self.get_queryset().model
</DeepExtract>
<DeepExtract>
if model is None:
model = self.get_model()
target = None
for field in self.get_model_fields(model=model):
if field.name == name:
target = field
field = target
</DeepExtract>
fieldtype = field._property.get_internal_type()
formfield = field._property.formfield()
try:
if fieldtype == 'BooleanField':
return bool(value)
elif fieldtype == 'CharField':
return str(value)
elif fieldtype == 'DateTimeField':
return forms.DateTimeField().clean(value)
else:
return formfield.clean(value)
except (ValueError, TypeError, ValidationError):
return None
|
def convert_field(name, value, model=None):
if model is None:
model = self.get_queryset().model
if model is None:
model = self.get_model()
target = None
for field in self.get_model_fields(model=model):
if field.name == name:
target = field
field = target
fieldtype = field._property.get_internal_type()
formfield = field._property.formfield()
try:
if fieldtype == 'BooleanField':
return bool(value)
elif fieldtype == 'CharField':
return str(value)
elif fieldtype == 'DateTimeField':
return forms.DateTimeField().clean(value)
else:
return formfield.clean(value)
except (ValueError, TypeError, ValidationError):
return None
|
detective.io
|
positive
|
def depart_admonition(self, node):
if self.v2:
<DeepExtract>
self.body.append(self.context.pop())
self.body.append(self.context.pop())
self.body.append(self.context.pop())
</DeepExtract>
else:
<DeepExtract>
self.body.append(self.context.pop())
</DeepExtract>
|
def depart_admonition(self, node):
if self.v2:
self.body.append(self.context.pop())
self.body.append(self.context.pop())
self.body.append(self.context.pop())
else:
self.body.append(self.context.pop())
</DeepExtract>
|
confluencebuilder
|
positive
|
def _add_validator_to_kwargs(kwargs, validator):
<DeepExtract>
if isinstance(kwargs.pop('validator', []), tuple):
existing = list(kwargs.pop('validator', []))
elif not isinstance(kwargs.pop('validator', []), list):
existing = [kwargs.pop('validator', [])]
existing = kwargs.pop('validator', [])
</DeepExtract>
existing.append(validator)
kwargs['validator'] = existing
|
def _add_validator_to_kwargs(kwargs, validator):
if isinstance(kwargs.pop('validator', []), tuple):
existing = list(kwargs.pop('validator', []))
elif not isinstance(kwargs.pop('validator', []), list):
existing = [kwargs.pop('validator', [])]
existing = kwargs.pop('validator', [])
existing.append(validator)
kwargs['validator'] = existing
|
endesive
|
positive
|
def add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
loss_gradients = None
if cfg.FPN.FPN_ON:
FPN.add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
model.CollectAndDistributeFpnRpnProposals()
if model.train:
loss_gradients = FPN.add_fpn_rpn_losses(model)
else:
<DeepExtract>
anchors = generate_anchors(stride=1.0 / spatial_scale_in, sizes=cfg.RPN.SIZES, aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
dim_out = dim_in
model.Conv(blob_in, 'conv_rpn', dim_in, dim_out, kernel=3, pad=1, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Relu('conv_rpn', 'conv_rpn')
model.Conv('conv_rpn', 'rpn_cls_logits', dim_in, num_anchors, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Conv('conv_rpn', 'rpn_bbox_pred', dim_in, 4 * num_anchors, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
if not model.train or cfg.MODEL.FASTER_RCNN:
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'], ['rpn_rois', 'rpn_roi_probs'], anchors=anchors, spatial_scale=spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
if model.train:
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
model.net.Alias('rpn_rois', 'rois')
</DeepExtract>
if model.train:
<DeepExtract>
model.net.SpatialNarrowAs(['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32')
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key)
loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(['rpn_cls_logits', 'rpn_labels_int32'], 'loss_rpn_cls', scale=model.GetLossScale())
loss_rpn_bbox = model.net.SmoothL1Loss(['rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights', 'rpn_bbox_outside_weights'], 'loss_rpn_bbox', beta=1.0 / 9.0, scale=model.GetLossScale())
loss_gradients = blob_utils.get_loss_gradients(model, [loss_rpn_cls, loss_rpn_bbox])
model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
loss_gradients = loss_gradients
</DeepExtract>
return loss_gradients
|
def add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
loss_gradients = None
if cfg.FPN.FPN_ON:
FPN.add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
model.CollectAndDistributeFpnRpnProposals()
if model.train:
loss_gradients = FPN.add_fpn_rpn_losses(model)
else:
anchors = generate_anchors(stride=1.0 / spatial_scale_in, sizes=cfg.RPN.SIZES, aspect_ratios=cfg.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
dim_out = dim_in
model.Conv(blob_in, 'conv_rpn', dim_in, dim_out, kernel=3, pad=1, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Relu('conv_rpn', 'conv_rpn')
model.Conv('conv_rpn', 'rpn_cls_logits', dim_in, num_anchors, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
model.Conv('conv_rpn', 'rpn_bbox_pred', dim_in, 4 * num_anchors, kernel=1, pad=0, stride=1, weight_init=gauss_fill(0.01), bias_init=const_fill(0.0))
if not model.train or cfg.MODEL.FASTER_RCNN:
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'], ['rpn_rois', 'rpn_roi_probs'], anchors=anchors, spatial_scale=spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
if model.train:
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
model.net.Alias('rpn_rois', 'rois')
if model.train:
model.net.SpatialNarrowAs(['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32')
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key)
loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(['rpn_cls_logits', 'rpn_labels_int32'], 'loss_rpn_cls', scale=model.GetLossScale())
loss_rpn_bbox = model.net.SmoothL1Loss(['rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights', 'rpn_bbox_outside_weights'], 'loss_rpn_bbox', beta=1.0 / 9.0, scale=model.GetLossScale())
loss_gradients = blob_utils.get_loss_gradients(model, [loss_rpn_cls, loss_rpn_bbox])
model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
loss_gradients = loss_gradients
return loss_gradients
|
Detectron-Cascade-RCNN
|
positive
|
def _load_test_suite(self, **kwargs):
name = kwargs['name']
included = kwargs['included']
excluded = kwargs.get('excluded')
base_dir = kwargs.get('base_dir')
<DeepExtract>
if not excluded:
excluded_contexts = set()
if not isinstance(excluded, list):
raise LoaderException('Expected test_discovery_symbols to be a list.')
all_test_context_list = set()
for symbol in excluded:
(path_or_glob, cls_name, method, injected_args) = self._parse_discovery_symbol(symbol, base_dir)
self.logger.debug('Parsed symbol into {} - {} - {} - {}'.format(path_or_glob, cls_name, method, injected_args))
path_or_glob = os.path.abspath(path_or_glob)
test_files = []
if os.path.isfile(path_or_glob):
test_files = [path_or_glob]
else:
test_files = self._find_test_files(path_or_glob)
self._add_top_level_dirs_to_sys_path(test_files)
for test_file in test_files:
directory = os.path.dirname(test_file)
module_name = os.path.basename(test_file)
test_context_list_for_file = self.discover(directory, module_name, cls_name, method, injected_args=injected_args)
all_test_context_list.update(test_context_list_for_file)
if len(test_context_list_for_file) == 0:
self.logger.warn("Didn't find any tests in %s " % test_file)
excluded_contexts = all_test_context_list
</DeepExtract>
<DeepExtract>
if not included:
included_contexts = set()
if not isinstance(included, list):
raise LoaderException('Expected test_discovery_symbols to be a list.')
all_test_context_list = set()
for symbol in included:
(path_or_glob, cls_name, method, injected_args) = self._parse_discovery_symbol(symbol, base_dir)
self.logger.debug('Parsed symbol into {} - {} - {} - {}'.format(path_or_glob, cls_name, method, injected_args))
path_or_glob = os.path.abspath(path_or_glob)
test_files = []
if os.path.isfile(path_or_glob):
test_files = [path_or_glob]
else:
test_files = self._find_test_files(path_or_glob)
self._add_top_level_dirs_to_sys_path(test_files)
for test_file in test_files:
directory = os.path.dirname(test_file)
module_name = os.path.basename(test_file)
test_context_list_for_file = self.discover(directory, module_name, cls_name, method, injected_args=injected_args)
all_test_context_list.update(test_context_list_for_file)
if len(test_context_list_for_file) == 0:
self.logger.warn("Didn't find any tests in %s " % test_file)
included_contexts = all_test_context_list
</DeepExtract>
self.logger.debug('Including tests: ' + str(included_contexts))
self.logger.debug('Excluding tests: ' + str(excluded_contexts))
<DeepExtract>
excluded_test_ids = set(map(lambda ctx: ctx.test_id, excluded_contexts))
all_test_context_list = set(filter(lambda ctx: ctx.test_id not in excluded_test_ids, included_contexts))
</DeepExtract>
if not all_test_context_list:
raise LoaderException('No tests found in ' + name)
return all_test_context_list
|
def _load_test_suite(self, **kwargs):
name = kwargs['name']
included = kwargs['included']
excluded = kwargs.get('excluded')
base_dir = kwargs.get('base_dir')
if not excluded:
excluded_contexts = set()
if not isinstance(excluded, list):
raise LoaderException('Expected test_discovery_symbols to be a list.')
all_test_context_list = set()
for symbol in excluded:
(path_or_glob, cls_name, method, injected_args) = self._parse_discovery_symbol(symbol, base_dir)
self.logger.debug('Parsed symbol into {} - {} - {} - {}'.format(path_or_glob, cls_name, method, injected_args))
path_or_glob = os.path.abspath(path_or_glob)
test_files = []
if os.path.isfile(path_or_glob):
test_files = [path_or_glob]
else:
test_files = self._find_test_files(path_or_glob)
self._add_top_level_dirs_to_sys_path(test_files)
for test_file in test_files:
directory = os.path.dirname(test_file)
module_name = os.path.basename(test_file)
test_context_list_for_file = self.discover(directory, module_name, cls_name, method, injected_args=injected_args)
all_test_context_list.update(test_context_list_for_file)
if len(test_context_list_for_file) == 0:
self.logger.warn("Didn't find any tests in %s " % test_file)
excluded_contexts = all_test_context_list
if not included:
included_contexts = set()
if not isinstance(included, list):
raise LoaderException('Expected test_discovery_symbols to be a list.')
all_test_context_list = set()
for symbol in included:
(path_or_glob, cls_name, method, injected_args) = self._parse_discovery_symbol(symbol, base_dir)
self.logger.debug('Parsed symbol into {} - {} - {} - {}'.format(path_or_glob, cls_name, method, injected_args))
path_or_glob = os.path.abspath(path_or_glob)
test_files = []
if os.path.isfile(path_or_glob):
test_files = [path_or_glob]
else:
test_files = self._find_test_files(path_or_glob)
self._add_top_level_dirs_to_sys_path(test_files)
for test_file in test_files:
directory = os.path.dirname(test_file)
module_name = os.path.basename(test_file)
test_context_list_for_file = self.discover(directory, module_name, cls_name, method, injected_args=injected_args)
all_test_context_list.update(test_context_list_for_file)
if len(test_context_list_for_file) == 0:
self.logger.warn("Didn't find any tests in %s " % test_file)
included_contexts = all_test_context_list
self.logger.debug('Including tests: ' + str(included_contexts))
self.logger.debug('Excluding tests: ' + str(excluded_contexts))
excluded_test_ids = set(map(lambda ctx: ctx.test_id, excluded_contexts))
all_test_context_list = set(filter(lambda ctx: ctx.test_id not in excluded_test_ids, included_contexts))
if not all_test_context_list:
raise LoaderException('No tests found in ' + name)
return all_test_context_list
|
ducktape
|
positive
|
def transform_image(self, image, out_h, out_w, padding_factor=1.0, crop_factor=1.0, theta=None):
<DeepExtract>
out_size = torch.Size((1, 3, out_h, out_w))
if theta is None:
theta = self.THETA_IDENTITY
theta = theta.expand(1, 2, 3).contiguous()
sampling_grid = F.affine_grid(theta, out_size)
elif theta.shape[1] == 2:
sampling_grid = F.affine_grid(theta, out_size)
else:
sampling_grid = self.gridGen(theta)
</DeepExtract>
sampling_grid.data = sampling_grid.data * padding_factor * crop_factor
if version.parse(torch.__version__) >= version.parse('1.3'):
warped_image_batch = F.grid_sample(image, sampling_grid, align_corners=True)
else:
warped_image_batch = F.grid_sample(image, sampling_grid)
return warped_image_batch
|
def transform_image(self, image, out_h, out_w, padding_factor=1.0, crop_factor=1.0, theta=None):
out_size = torch.Size((1, 3, out_h, out_w))
if theta is None:
theta = self.THETA_IDENTITY
theta = theta.expand(1, 2, 3).contiguous()
sampling_grid = F.affine_grid(theta, out_size)
elif theta.shape[1] == 2:
sampling_grid = F.affine_grid(theta, out_size)
else:
sampling_grid = self.gridGen(theta)
sampling_grid.data = sampling_grid.data * padding_factor * crop_factor
if version.parse(torch.__version__) >= version.parse('1.3'):
warped_image_batch = F.grid_sample(image, sampling_grid, align_corners=True)
else:
warped_image_batch = F.grid_sample(image, sampling_grid)
return warped_image_batch
|
DenseMatching
|
positive
|
def depict_workspace_object(self, w, row, column, o, maxImportance, description_structures):
if maxImportance != 0.0 and o.relativeImportance == maxImportance:
attr = curses.A_BOLD
else:
attr = curses.A_NORMAL
w.addstr(row, column, str(o), attr)
column += len(str(o))
if o.descriptions:
w.addstr(row, column, ' (', curses.A_NORMAL)
column += 2
for (i, d) in enumerate(o.descriptions):
if i != 0:
w.addstr(row, column, ', ', curses.A_NORMAL)
column += 2
<DeepExtract>
if d.descriptor.activation == 100:
(s, attr) = (d.descriptor.name.upper(), curses.A_STANDOUT)
if d.descriptor.activation > 50:
(s, attr) = (d.descriptor.name.upper(), curses.A_BOLD)
else:
(s, attr) = (d.descriptor.name.lower(), curses.A_NORMAL)
</DeepExtract>
if d not in description_structures:
s = '[%s]' % s
w.addstr(row, column, s, attr)
column += len(s)
w.addstr(row, column, ')', curses.A_NORMAL)
column += 1
return column
|
def depict_workspace_object(self, w, row, column, o, maxImportance, description_structures):
if maxImportance != 0.0 and o.relativeImportance == maxImportance:
attr = curses.A_BOLD
else:
attr = curses.A_NORMAL
w.addstr(row, column, str(o), attr)
column += len(str(o))
if o.descriptions:
w.addstr(row, column, ' (', curses.A_NORMAL)
column += 2
for (i, d) in enumerate(o.descriptions):
if i != 0:
w.addstr(row, column, ', ', curses.A_NORMAL)
column += 2
if d.descriptor.activation == 100:
(s, attr) = (d.descriptor.name.upper(), curses.A_STANDOUT)
if d.descriptor.activation > 50:
(s, attr) = (d.descriptor.name.upper(), curses.A_BOLD)
else:
(s, attr) = (d.descriptor.name.lower(), curses.A_NORMAL)
if d not in description_structures:
s = '[%s]' % s
w.addstr(row, column, s, attr)
column += len(s)
w.addstr(row, column, ')', curses.A_NORMAL)
column += 1
return column
|
copycat
|
positive
|
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node is a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
if aux and self.depth > 1:
<DeepExtract>
if self.depth - 1:
parentpath = self.depth - 1[0:depth * self.path.steplen]
parentpath = ''
</DeepExtract>
return aux and node.path.startswith(parentpath)
return aux
|
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node is a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
if aux and self.depth > 1:
if self.depth - 1:
parentpath = self.depth - 1[0:depth * self.path.steplen]
parentpath = ''
return aux and node.path.startswith(parentpath)
return aux
|
django-treebeard
|
positive
|
def get_tl_line_values_from_file_contents(content, CRLF=True, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0, sort_by_confidences=True):
"""
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
xmin,ymin,xmax,ymax,[confidence],[transcription]
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
"""
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split('\r\n' if CRLF else '\n')
for line in lines:
line = line.replace('\r', '').replace('\n', '')
if line != '':
<DeepExtract>
confidence = 0.0
transcription = ''
points = []
numPoints = 4
if LTRB:
numPoints = 4
if withTranscription and withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if m == None:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,?\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax')
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if xmax < xmin:
raise Exception('Xmax value (%s) not valid (Xmax < Xmin).' % xmax)
if ymax < ymin:
raise Exception('Ymax value (%s) not valid (Ymax < Ymin).' % ymax)
points = [float(m.group(i)) for i in range(1, numPoints + 1)]
if imWidth > 0 and imHeight > 0:
validate_point_inside_bounds(xmin, ymin, imWidth, imHeight)
validate_point_inside_bounds(xmax, ymax, imWidth, imHeight)
else:
numPoints = 8
if withTranscription and withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4')
points = [float(m.group(i)) for i in range(1, numPoints + 1)]
validate_clockwise_points(points)
if imWidth > 0 and imHeight > 0:
validate_point_inside_bounds(points[0], points[1], imWidth, imHeight)
validate_point_inside_bounds(points[2], points[3], imWidth, imHeight)
validate_point_inside_bounds(points[4], points[5], imWidth, imHeight)
validate_point_inside_bounds(points[6], points[7], imWidth, imHeight)
if withConfidence:
try:
confidence = float(m.group(numPoints + 1))
except ValueError:
raise Exception('Confidence value must be a float')
if withTranscription:
posTranscription = numPoints + (2 if withConfidence else 1)
transcription = m.group(posTranscription)
m2 = re.match('^\\s*\\"(.*)\\"\\s*$', transcription)
if m2 != None:
transcription = m2.group(1).replace('\\\\', '\\').replace('\\"', '"')
(points, confidence, transcription) = (points, confidence, transcription)
</DeepExtract>
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if withConfidence and len(confidencesList) > 0 and sort_by_confidences:
import numpy as np
sorted_ind = np.argsort(-np.array(confidencesList))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return (pointsList, confidencesList, transcriptionsList)
|
def get_tl_line_values_from_file_contents(content, CRLF=True, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0, sort_by_confidences=True):
"""
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
xmin,ymin,xmax,ymax,[confidence],[transcription]
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
"""
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split('\r\n' if CRLF else '\n')
for line in lines:
line = line.replace('\r', '').replace('\n', '')
if line != '':
confidence = 0.0
transcription = ''
points = []
numPoints = 4
if LTRB:
numPoints = 4
if withTranscription and withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if m == None:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,?\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax')
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if xmax < xmin:
raise Exception('Xmax value (%s) not valid (Xmax < Xmin).' % xmax)
if ymax < ymin:
raise Exception('Ymax value (%s) not valid (Ymax < Ymin).' % ymax)
points = [float(m.group(i)) for i in range(1, numPoints + 1)]
if imWidth > 0 and imHeight > 0:
validate_point_inside_bounds(xmin, ymin, imWidth, imHeight)
validate_point_inside_bounds(xmax, ymax, imWidth, imHeight)
else:
numPoints = 8
if withTranscription and withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription')
elif withConfidence:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*([0-1].?[0-9]*)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence')
elif withTranscription:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,(.*)$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription')
else:
m = re.match('^\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*,\\s*(-?[0-9]+)\\s*$', line)
if m == None:
raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4')
points = [float(m.group(i)) for i in range(1, numPoints + 1)]
validate_clockwise_points(points)
if imWidth > 0 and imHeight > 0:
validate_point_inside_bounds(points[0], points[1], imWidth, imHeight)
validate_point_inside_bounds(points[2], points[3], imWidth, imHeight)
validate_point_inside_bounds(points[4], points[5], imWidth, imHeight)
validate_point_inside_bounds(points[6], points[7], imWidth, imHeight)
if withConfidence:
try:
confidence = float(m.group(numPoints + 1))
except ValueError:
raise Exception('Confidence value must be a float')
if withTranscription:
posTranscription = numPoints + (2 if withConfidence else 1)
transcription = m.group(posTranscription)
m2 = re.match('^\\s*\\"(.*)\\"\\s*$', transcription)
if m2 != None:
transcription = m2.group(1).replace('\\\\', '\\').replace('\\"', '"')
(points, confidence, transcription) = (points, confidence, transcription)
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if withConfidence and len(confidencesList) > 0 and sort_by_confidences:
import numpy as np
sorted_ind = np.argsort(-np.array(confidencesList))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return (pointsList, confidencesList, transcriptionsList)
|
AdelaiDet
|
positive
|
def run(self):
<DeepExtract>
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
</DeepExtract>
<DeepExtract>
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
</DeepExtract>
<DeepExtract>
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
</DeepExtract>
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
<DeepExtract>
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
</DeepExtract>
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
def run(self):
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, 'setup.py')
versioneer_py = os.path.join(root, 'versioneer.py')
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = "Versioneer was unable to run the project root directory. Versioneer requires setup.py to be executed from its immediate directory (like 'python setup.py COMMAND'), or in a way that lets it use sys.argv[0] to find the root (like 'python path/to/setup.py COMMAND')."
raise VersioneerBadRootError(err)
try:
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print('Warning: build in %s is using versioneer.py from %s' % (os.path.dirname(me), versioneer_py))
except NameError:
pass
root = root
setup_cfg = os.path.join(root, 'setup.cfg')
parser = configparser.SafeConfigParser()
with open(setup_cfg, 'r') as f:
parser.readfp(f)
VCS = parser.get('versioneer', 'VCS')
def get(parser, name):
if parser.has_option('versioneer', name):
cfg = parser.get('versioneer', name)
cfg = None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, 'style') or ''
cfg.versionfile_source = get(parser, 'versionfile_source')
cfg.versionfile_build = get(parser, 'versionfile_build')
cfg.tag_prefix = get(parser, 'tag_prefix')
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ''
cfg.parentdir_prefix = get(parser, 'parentdir_prefix')
cfg.verbose = get(parser, 'verbose')
cfg = cfg
if 'versioneer' in sys.modules:
del sys.modules['versioneer']
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, 'please set [versioneer]VCS= in setup.cfg'
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, 'please set versioneer.versionfile_source'
assert cfg.tag_prefix is not None, 'please set versioneer.tag_prefix'
versionfile_abs = os.path.join(root, cfg.versionfile_source)
get_keywords_f = handlers.get('get_keywords')
from_keywords_f = handlers.get('keywords')
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print('got version from expanded keyword %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print('got version from file %s %s' % (versionfile_abs, ver))
versions = ver
except NotThisMethod:
pass
from_vcs_f = handlers.get('pieces_from_vcs')
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print('got version from VCS %s' % ver)
versions = ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print('got version from parentdir %s' % ver)
versions = ver
except NotThisMethod:
pass
if verbose:
print('unable to compute version')
versions = {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None}
target_versionfile = cfg.versionfile_source
print('UPDATING %s' % target_versionfile)
os.unlink(target_versionfile)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(',', ': '))
with open(target_versionfile, 'w') as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (target_versionfile, versions['version']))
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})
|
eliot
|
positive
|
def filter_tile_type(self, tile_type_id):
"""Return True if the dataset should be included, False otherwise."""
<DeepExtract>
try:
tile_types = self.datacube.tile_types
except AttributeError:
tile_types = None
if tile_types:
try:
tt_set = set(json.loads(tile_types))
except (TypeError, ValueError):
try:
tt_set = set([int(tile_types)])
except ValueError:
raise AssertionError("Unable to parse the 'tile_types' " + 'configuration file item.')
else:
tt_set = None
tt_set = tt_set
</DeepExtract>
return tt_set is None or tile_type_id in tt_set
|
def filter_tile_type(self, tile_type_id):
"""Return True if the dataset should be included, False otherwise."""
try:
tile_types = self.datacube.tile_types
except AttributeError:
tile_types = None
if tile_types:
try:
tt_set = set(json.loads(tile_types))
except (TypeError, ValueError):
try:
tt_set = set([int(tile_types)])
except ValueError:
raise AssertionError("Unable to parse the 'tile_types' " + 'configuration file item.')
else:
tt_set = None
tt_set = tt_set
return tt_set is None or tile_type_id in tt_set
|
agdc
|
positive
|
def find_outliers(points, outlier_threshold=3, normalized=True):
"""
Finds and returns outliers in the 'points'.
Outliers are those items in the 'points' that have a normalized distance
that is at least 'outlier_threshold' away from the the calculated center
of the population defined in 'points'. The normalized distance of a
point is the distance from the point to its cluster's centroid divided by
the mean distance of all point-to-centroid distances.
Arguments:
points: list of n-dimensional points
An NxM list of lists defining the population to check for outliers
where M is the number of dimensions per point and N is the number
of points.
outlier_threshold: float
Used to define outliers. Outliers are points with normalized
distances greater than this threshold.
normalized: bool
Value indicating whether 'points' have already been normalized or
not. Passing False for this argument will result in the original
'points' being normalized before the outlier search starts.
Returns:
The indexes of the outliers in 'points'.
"""
if not normalized:
<DeepExtract>
if len(points) > 0 and (not is_iterable(points[0])):
std = std_dev(points)
if std == 0:
points = [0] * len(points)
else:
points = divide_by_scalar(points, std_dev(points))
dimensions = zip(*points)
std = [std_dev(d) for d in dimensions]
if 0 in std:
norm_points = []
for point in points:
norm_point = []
for (p, s) in zip(point, std):
if s == 0:
norm_point.append(0.0)
else:
norm_point.append(p / s)
norm_points.append(norm_point)
points = norm_points
else:
points = divide_lists(points, std)
</DeepExtract>
<DeepExtract>
if is_iterable(points[0]):
dimension = len(points[0])
else:
dimension = 1
i_dimension = -1
for i in range(len(points)):
if is_iterable(points[i]):
i_dimension = len(points[i])
else:
i_dimension = 1
if i_dimension != dimension:
raise ValueError('Points must have the same number of dimensions.')
d = dimension
</DeepExtract>
if is_nested(points):
dimensions = zip(*points)
midpoint_index = (len(points) - 1) / 2
if d == 1 and (not is_nested(points)):
centroids = [sorted(points)[midpoint_index]]
else:
centroids = [[sorted(dim)[midpoint_index] for dim in dimensions]]
<DeepExtract>
if len(points) < 1:
raise ValueError('cluster requires at least one point.')
if len(kmeans(points, centroids)[0]) < 1:
raise ValueError('cluster requires at least one centroid.')
d = get_dimension(points)
n = len(points)
cluster_indexes = range(len(kmeans(points, centroids)[0]))
if d != get_dimension(kmeans(points, centroids)[0]):
raise ValueError('Points and centroids must have the same dimension(found {0} and {1} respectively)'.format(d, get_dimension(kmeans(points, centroids)[0])))
clusters = [0] * n
min_distances = [0] * n
for i in range(n):
distances = [sqr_euclidean_dist(points[i], kmeans(points, centroids)[0][j]) for j in cluster_indexes]
if d == 1 and (not is_nested(kmeans(points, centroids)[0])):
distance_totals = distances
else:
distance_totals = [sum(distances[j]) for j in range(len(distances))]
clusters[i] = index_of_min(distance_totals)
min_distances[i] = distance_totals[clusters[i]]
(_, distances) = (clusters, [math.sqrt(dist) for dist in min_distances])
</DeepExtract>
mean_distance = sum(distances) / float(len(distances))
return [i for (i, distance) in enumerate(distances) if mean_distance > 0 and distance / mean_distance >= outlier_threshold]
|
def find_outliers(points, outlier_threshold=3, normalized=True):
"""
Finds and returns outliers in the 'points'.
Outliers are those items in the 'points' that have a normalized distance
that is at least 'outlier_threshold' away from the the calculated center
of the population defined in 'points'. The normalized distance of a
point is the distance from the point to its cluster's centroid divided by
the mean distance of all point-to-centroid distances.
Arguments:
points: list of n-dimensional points
An NxM list of lists defining the population to check for outliers
where M is the number of dimensions per point and N is the number
of points.
outlier_threshold: float
Used to define outliers. Outliers are points with normalized
distances greater than this threshold.
normalized: bool
Value indicating whether 'points' have already been normalized or
not. Passing False for this argument will result in the original
'points' being normalized before the outlier search starts.
Returns:
The indexes of the outliers in 'points'.
"""
if not normalized:
if len(points) > 0 and (not is_iterable(points[0])):
std = std_dev(points)
if std == 0:
points = [0] * len(points)
else:
points = divide_by_scalar(points, std_dev(points))
dimensions = zip(*points)
std = [std_dev(d) for d in dimensions]
if 0 in std:
norm_points = []
for point in points:
norm_point = []
for (p, s) in zip(point, std):
if s == 0:
norm_point.append(0.0)
else:
norm_point.append(p / s)
norm_points.append(norm_point)
points = norm_points
else:
points = divide_lists(points, std)
if is_iterable(points[0]):
dimension = len(points[0])
else:
dimension = 1
i_dimension = -1
for i in range(len(points)):
if is_iterable(points[i]):
i_dimension = len(points[i])
else:
i_dimension = 1
if i_dimension != dimension:
raise ValueError('Points must have the same number of dimensions.')
d = dimension
if is_nested(points):
dimensions = zip(*points)
midpoint_index = (len(points) - 1) / 2
if d == 1 and (not is_nested(points)):
centroids = [sorted(points)[midpoint_index]]
else:
centroids = [[sorted(dim)[midpoint_index] for dim in dimensions]]
if len(points) < 1:
raise ValueError('cluster requires at least one point.')
if len(kmeans(points, centroids)[0]) < 1:
raise ValueError('cluster requires at least one centroid.')
d = get_dimension(points)
n = len(points)
cluster_indexes = range(len(kmeans(points, centroids)[0]))
if d != get_dimension(kmeans(points, centroids)[0]):
raise ValueError('Points and centroids must have the same dimension(found {0} and {1} respectively)'.format(d, get_dimension(kmeans(points, centroids)[0])))
clusters = [0] * n
min_distances = [0] * n
for i in range(n):
distances = [sqr_euclidean_dist(points[i], kmeans(points, centroids)[0][j]) for j in cluster_indexes]
if d == 1 and (not is_nested(kmeans(points, centroids)[0])):
distance_totals = distances
else:
distance_totals = [sum(distances[j]) for j in range(len(distances))]
clusters[i] = index_of_min(distance_totals)
min_distances[i] = distance_totals[clusters[i]]
(_, distances) = (clusters, [math.sqrt(dist) for dist in min_distances])
mean_distance = sum(distances) / float(len(distances))
return [i for (i, distance) in enumerate(distances) if mean_distance > 0 and distance / mean_distance >= outlier_threshold]
|
avocado
|
positive
|
def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = []
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = []
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
t_to_tt_idx_hds = []
for (b, nlu_t1) in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
tt_to_t_idx1 = []
t_to_tt_idx1 = []
nlu_tt1 = []
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(len(nlu_tt1))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token)
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
<DeepExtract>
tokens = []
segment_ids = []
t_to_tt_idx_hds1 = []
tokens.append('[CLS]')
i_st_nlu = len(tokens)
segment_ids.append(0)
for token in nlu_tt1:
tokens.append(token)
segment_ids.append(0)
i_ed_nlu = len(tokens)
tokens.append('[SEP]')
segment_ids.append(0)
i_hds = []
for (i, hds11) in enumerate(hds1):
i_st_hd = len(tokens)
t_to_tt_idx_hds11 = []
sub_tok = []
for sub_tok1 in hds11.split():
t_to_tt_idx_hds11.append(len(sub_tok))
sub_tok += tokenizer.tokenize(sub_tok1)
t_to_tt_idx_hds1.append(t_to_tt_idx_hds11)
tokens += sub_tok
i_ed_hd = len(tokens)
i_hds.append((i_st_hd, i_ed_hd))
segment_ids += [1] * len(sub_tok)
if i < len(hds1) - 1:
tokens.append('[SEP]')
segment_ids.append(0)
elif i == len(hds1) - 1:
tokens.append('[SEP]')
segment_ids.append(1)
else:
raise EnvironmentError
i_nlu = (i_st_nlu, i_ed_nlu)
(tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1) = (tokens, segment_ids, i_nlu, i_hds, t_to_tt_idx_hds1)
</DeepExtract>
assert len(t_to_tt_idx_hds1) == len(hds1)
t_to_tt_idx_hds.append(t_to_tt_idx_hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
input_mask1 = [1] * len(input_ids1)
if len(nlu_t) == 1:
max_seq_length = len(input_ids1)
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
(all_encoder_layer, pooled_output) = model_bert(all_input_ids, all_segment_ids, all_input_mask)
<DeepExtract>
l_hpu = []
for i_hds1 in i_hds:
for i_hds11 in i_hds1:
l_hpu.append(i_hds11[1] - i_hds11[0])
l_hpu = l_hpu
</DeepExtract>
assert len(set(l_n)) == 1 and len(set(i_nlu)) == 1
assert l_n[0] == i_nlu[0][1] - i_nlu[0][0]
return (all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds)
|
def get_bert_output(model_bert, tokenizer, nlu_t, hds, max_seq_length):
"""
Here, input is toknized further by WordPiece (WP) tokenizer and fed into BERT.
INPUT
:param model_bert:
:param tokenizer: WordPiece toknizer
:param nlu: Question
:param nlu_t: CoreNLP tokenized nlu.
:param hds: Headers
:param hs_t: None or 1st-level tokenized headers
:param max_seq_length: max input token length
OUTPUT
tokens: BERT input tokens
nlu_tt: WP-tokenized input natural language questions
orig_to_tok_index: map the index of 1st-level-token to the index of 2nd-level-token
tok_to_orig_index: inverse map.
"""
l_n = []
l_hs = []
input_ids = []
tokens = []
segment_ids = []
input_mask = []
i_nlu = []
i_hds = []
doc_tokens = []
nlu_tt = []
t_to_tt_idx = []
tt_to_t_idx = []
t_to_tt_idx_hds = []
for (b, nlu_t1) in enumerate(nlu_t):
hds1 = hds[b]
l_hs.append(len(hds1))
tt_to_t_idx1 = []
t_to_tt_idx1 = []
nlu_tt1 = []
for (i, token) in enumerate(nlu_t1):
t_to_tt_idx1.append(len(nlu_tt1))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tt_to_t_idx1.append(i)
nlu_tt1.append(sub_token)
nlu_tt.append(nlu_tt1)
tt_to_t_idx.append(tt_to_t_idx1)
t_to_tt_idx.append(t_to_tt_idx1)
l_n.append(len(nlu_tt1))
tokens = []
segment_ids = []
t_to_tt_idx_hds1 = []
tokens.append('[CLS]')
i_st_nlu = len(tokens)
segment_ids.append(0)
for token in nlu_tt1:
tokens.append(token)
segment_ids.append(0)
i_ed_nlu = len(tokens)
tokens.append('[SEP]')
segment_ids.append(0)
i_hds = []
for (i, hds11) in enumerate(hds1):
i_st_hd = len(tokens)
t_to_tt_idx_hds11 = []
sub_tok = []
for sub_tok1 in hds11.split():
t_to_tt_idx_hds11.append(len(sub_tok))
sub_tok += tokenizer.tokenize(sub_tok1)
t_to_tt_idx_hds1.append(t_to_tt_idx_hds11)
tokens += sub_tok
i_ed_hd = len(tokens)
i_hds.append((i_st_hd, i_ed_hd))
segment_ids += [1] * len(sub_tok)
if i < len(hds1) - 1:
tokens.append('[SEP]')
segment_ids.append(0)
elif i == len(hds1) - 1:
tokens.append('[SEP]')
segment_ids.append(1)
else:
raise EnvironmentError
i_nlu = (i_st_nlu, i_ed_nlu)
(tokens1, segment_ids1, i_nlu1, i_hds1, t_to_tt_idx_hds1) = (tokens, segment_ids, i_nlu, i_hds, t_to_tt_idx_hds1)
assert len(t_to_tt_idx_hds1) == len(hds1)
t_to_tt_idx_hds.append(t_to_tt_idx_hds1)
input_ids1 = tokenizer.convert_tokens_to_ids(tokens1)
input_mask1 = [1] * len(input_ids1)
if len(nlu_t) == 1:
max_seq_length = len(input_ids1)
while len(input_ids1) < max_seq_length:
input_ids1.append(0)
input_mask1.append(0)
segment_ids1.append(0)
assert len(input_ids1) == max_seq_length
assert len(input_mask1) == max_seq_length
assert len(segment_ids1) == max_seq_length
input_ids.append(input_ids1)
tokens.append(tokens1)
segment_ids.append(segment_ids1)
input_mask.append(input_mask1)
i_nlu.append(i_nlu1)
i_hds.append(i_hds1)
all_input_ids = torch.tensor(input_ids, dtype=torch.long).to(device)
all_input_mask = torch.tensor(input_mask, dtype=torch.long).to(device)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long).to(device)
(all_encoder_layer, pooled_output) = model_bert(all_input_ids, all_segment_ids, all_input_mask)
l_hpu = []
for i_hds1 in i_hds:
for i_hds11 in i_hds1:
l_hpu.append(i_hds11[1] - i_hds11[0])
l_hpu = l_hpu
assert len(set(l_n)) == 1 and len(set(i_nlu)) == 1
assert l_n[0] == i_nlu[0][1] - i_nlu[0][0]
return (all_encoder_layer, pooled_output, tokens, i_nlu, i_hds, l_n, l_hpu, l_hs, nlu_tt, t_to_tt_idx, tt_to_t_idx, t_to_tt_idx_hds)
|
editsql
|
positive
|
def _add_osx_binary_parser(self):
if platform.system() == 'Darwin':
<DeepExtract>
project_config = os.path.join(self.paths.build, '.cproject')
cproject_config = project_config
</DeepExtract>
tree = ElementTree.parse(cproject_config)
if not tree.find(".//storageModule[@moduleId='org.eclipse.cdt.core.settings'][@name='Configuration']/*extension[@id='org.eclipse.cdt.core.MachO64']"):
extensions_node = tree.find(".//storageModule[@moduleId='org.eclipse.cdt.core.settings'][@name='Configuration']//extensions")
extensions_node.clear()
binary_parser_node = ElementTree.fromstring(OSX_BINARY_PARSER)
extensions_node.append(binary_parser_node)
tree.write(cproject_config)
|
def _add_osx_binary_parser(self):
if platform.system() == 'Darwin':
project_config = os.path.join(self.paths.build, '.cproject')
cproject_config = project_config
tree = ElementTree.parse(cproject_config)
if not tree.find(".//storageModule[@moduleId='org.eclipse.cdt.core.settings'][@name='Configuration']/*extension[@id='org.eclipse.cdt.core.MachO64']"):
extensions_node = tree.find(".//storageModule[@moduleId='org.eclipse.cdt.core.settings'][@name='Configuration']//extensions")
extensions_node.clear()
binary_parser_node = ElementTree.fromstring(OSX_BINARY_PARSER)
extensions_node.append(binary_parser_node)
tree.write(cproject_config)
|
client
|
positive
|
def _close(self):
"""internal close port helper"""
if self._port_handle is not None:
win32.SetCommTimeouts(self._port_handle, self._orgTimeouts)
if self._overlapped_read is not None:
<DeepExtract>
self._cancel_overlapped_io(self._overlapped_read)
</DeepExtract>
win32.CloseHandle(self._overlapped_read.hEvent)
self._overlapped_read = None
if self._overlapped_write is not None:
<DeepExtract>
self._cancel_overlapped_io(self._overlapped_write)
</DeepExtract>
win32.CloseHandle(self._overlapped_write.hEvent)
self._overlapped_write = None
win32.CloseHandle(self._port_handle)
self._port_handle = None
|
def _close(self):
"""internal close port helper"""
if self._port_handle is not None:
win32.SetCommTimeouts(self._port_handle, self._orgTimeouts)
if self._overlapped_read is not None:
self._cancel_overlapped_io(self._overlapped_read)
win32.CloseHandle(self._overlapped_read.hEvent)
self._overlapped_read = None
if self._overlapped_write is not None:
self._cancel_overlapped_io(self._overlapped_write)
win32.CloseHandle(self._overlapped_write.hEvent)
self._overlapped_write = None
win32.CloseHandle(self._port_handle)
self._port_handle = None
|
bitio
|
positive
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.