before
stringlengths
0
955k
after
stringlengths
0
877k
repo
stringlengths
1
74
type
stringclasses
1 value
def assertHasMessage(testCase, logger, messageType, fields=None): """ Assert that the given logger has a message of the given type, and the first message found of this type has the given fields. This can be used as the assertion function passed to L{validateLogging} or as part of a unit test. @param testCase: L{unittest.TestCase} instance. @param logger: L{eliot.MemoryLogger} whose messages will be checked. @param messageType: L{eliot.MessageType} indicating which message we're looking for. @param fields: The first message of the given type found must have a superset of the given C{dict} as its fields. If C{None} then fields are not checked. @return: The first found L{LoggedMessage} of the given type, if field validation succeeded. @raises AssertionError: No message was found, or the fields were not superset of given fields. """ if fields is None: fields = {} messages = LoggedMessage.ofType(logger.messages, messageType) testCase.assertTrue(messages, 'No messages of type %s' % (messageType,)) loggedMessage = messages[0] <DeepExtract> messageSubset = dict([(key, value) for (key, value) in loggedMessage.message.items() if key in fields]) testCase.assertEqual(messageSubset, fields) </DeepExtract> return loggedMessage
def assertHasMessage(testCase, logger, messageType, fields=None): """ Assert that the given logger has a message of the given type, and the first message found of this type has the given fields. This can be used as the assertion function passed to L{validateLogging} or as part of a unit test. @param testCase: L{unittest.TestCase} instance. @param logger: L{eliot.MemoryLogger} whose messages will be checked. @param messageType: L{eliot.MessageType} indicating which message we're looking for. @param fields: The first message of the given type found must have a superset of the given C{dict} as its fields. If C{None} then fields are not checked. @return: The first found L{LoggedMessage} of the given type, if field validation succeeded. @raises AssertionError: No message was found, or the fields were not superset of given fields. """ if fields is None: fields = {} messages = LoggedMessage.ofType(logger.messages, messageType) testCase.assertTrue(messages, 'No messages of type %s' % (messageType,)) loggedMessage = messages[0] messageSubset = dict([(key, value) for (key, value) in loggedMessage.message.items() if key in fields]) testCase.assertEqual(messageSubset, fields) return loggedMessage
eliot
positive
def test_collect_vpcs() -> None: do_client = ClientMock({'list_regions': regions, 'list_vpcs': vpcs}) <DeepExtract> cloud = Cloud(id='do') team = DigitalOceanTeam(id='test_team', urn='do:team:test_team') plugin_instance = DigitalOceanTeamCollector(team, do_client) plugin_instance.collect() cloud_graph = Graph(root=cloud) graph = Graph(root=GraphRoot(id='root', tags={})) cloud_graph.merge(plugin_instance.graph) graph.merge(cloud_graph) sanitize(graph) graph = graph </DeepExtract> <DeepExtract> for (node_from, node_to, edge) in graph.edges: if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:region:fra1') and (node_to.urn == 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)): return assert False, f"Edge {'do:region:fra1'} -> {'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959'} not found" </DeepExtract> vpc = graph.search_first('urn', 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959') assert vpc.urn == 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959' assert vpc.name == 'default-fra1' assert vpc.description == '' assert vpc.ip_range == '127.0.0.1/20' assert vpc.is_default is True
def test_collect_vpcs() -> None: do_client = ClientMock({'list_regions': regions, 'list_vpcs': vpcs}) cloud = Cloud(id='do') team = DigitalOceanTeam(id='test_team', urn='do:team:test_team') plugin_instance = DigitalOceanTeamCollector(team, do_client) plugin_instance.collect() cloud_graph = Graph(root=cloud) graph = Graph(root=GraphRoot(id='root', tags={})) cloud_graph.merge(plugin_instance.graph) graph.merge(cloud_graph) sanitize(graph) graph = graph for (node_from, node_to, edge) in graph.edges: if hasattr(node_from, 'urn') and hasattr(node_to, 'urn') and (node_from.urn == 'do:region:fra1') and (node_to.urn == 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959') and (edge.edge_type == (EdgeType.delete if delete else EdgeType.default)): return assert False, f"Edge {'do:region:fra1'} -> {'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959'} not found" vpc = graph.search_first('urn', 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959') assert vpc.urn == 'do:vpc:0d3176ad-41e0-4021-b831-0c5c45c60959' assert vpc.name == 'default-fra1' assert vpc.description == '' assert vpc.ip_range == '127.0.0.1/20' assert vpc.is_default is True
cloudkeeper
positive
@staticmethod def gate_reset(gate: Reset, ctx: _NumbaBackendContext) -> _NumbaBackendContext: """Implementation of reset operation.""" if ctx.save_cxt_cache: ctx.cache = ctx.qubits.copy() ctx.save_cxt_cache = False qubits = ctx.qubits n_qubits = ctx.n_qubits for target in gate.target_iter(n_qubits): rand = random.random() <DeepExtract> p0 = 0.0 lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): val = qubits[_shifted(lower_mask, i)] p0 += val.real * val.real + val.imag * val.imag p0 = p0 </DeepExtract> if rand < p0: <DeepExtract> sqrtp_inv = 1.0 / math.sqrt(p0) lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): i0 = _shifted(lower_mask, i) qubits[i0] *= sqrtp_inv qubits[i0 + (1 << target)] = 0.0 </DeepExtract> else: <DeepExtract> sqrtp_inv = 1.0 / math.sqrt(1.0 - p0) lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): i0 = _shifted(lower_mask, i) qubits[i0] = qubits[i0 + (1 << target)] * sqrtp_inv qubits[i0 + (1 << target)] = 0.0 </DeepExtract> return ctx
@staticmethod def gate_reset(gate: Reset, ctx: _NumbaBackendContext) -> _NumbaBackendContext: """Implementation of reset operation.""" if ctx.save_cxt_cache: ctx.cache = ctx.qubits.copy() ctx.save_cxt_cache = False qubits = ctx.qubits n_qubits = ctx.n_qubits for target in gate.target_iter(n_qubits): rand = random.random() p0 = 0.0 lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): val = qubits[_shifted(lower_mask, i)] p0 += val.real * val.real + val.imag * val.imag p0 = p0 if rand < p0: sqrtp_inv = 1.0 / math.sqrt(p0) lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): i0 = _shifted(lower_mask, i) qubits[i0] *= sqrtp_inv qubits[i0 + (1 << target)] = 0.0 else: sqrtp_inv = 1.0 / math.sqrt(1.0 - p0) lower_mask = (1 << _QSMask(target)) - 1 for i in prange(1 << _QSMask(n_qubits) - 1): i0 = _shifted(lower_mask, i) qubits[i0] = qubits[i0 + (1 << target)] * sqrtp_inv qubits[i0 + (1 << target)] = 0.0 return ctx
Blueqat
positive
def addSpecial(self, label, idx=None): <DeepExtract> label = label.lower() if self.lower else label if label in self.labelToIdx: idx = self.labelToIdx[label] else: idx = len(self.idxToLabel) self.idxToLabel[idx] = label self.labelToIdx[label] = idx idx = idx </DeepExtract> self.special += [idx]
def addSpecial(self, label, idx=None): label = label.lower() if self.lower else label if label in self.labelToIdx: idx = self.labelToIdx[label] else: idx = len(self.idxToLabel) self.idxToLabel[idx] = label self.labelToIdx[label] = idx idx = idx self.special += [idx]
dtr-prototype
positive
def fit(self, X, y=None, sample_weight=None): """Fit the model with X. Parameters ---------- X : Triangle-like Set of LDFs to which the munich adjustment will be applied. y : Ignored sample_weight : Ignored Returns ------- self : object Returns the instance itself. """ from chainladder import options if self.paid_to_incurred is None: raise ValueError('Must enter valid value for paid_to_incurred.') if X.array_backend == 'sparse': obj = X.set_backend('numpy') else: obj = X.copy() xp = obj.get_array_module() self.xp = xp missing = xp.nan_to_num(obj.values) * obj.nan_triangle == 0 self.rho_ = obj[obj.origin == obj.origin.min()] if len(xp.where(missing)[0]) > 0: if self.fillna: from chainladder.methods import Chainladder filler = Chainladder().fit(obj).full_expectation_ filler = filler[filler.valuation <= obj.valuation_date].values obj.values = xp.where(missing, filler, obj.values) else: raise ValueError('MunichAdjustment cannot be performed when P/I or I/P ' + 'ratios cannot be computed. Use `fillna=True` to impute zero' + ' values of the triangle with simple chainladder expectation.') if 'ldf_' not in obj: obj = Development().fit_transform(obj) <DeepExtract> if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.get_array_module() paid = obj[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj[item] incurred = obj[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_X_ = xp.concatenate((paid, incurred), axis=0) </DeepExtract> <DeepExtract> if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.ldf_.get_array_module() paid = obj.ldf_[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj.ldf_[item] incurred = obj.ldf_[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj.ldf_[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_ldf_ = xp.concatenate((paid, incurred), axis=0) </DeepExtract> <DeepExtract> if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.sigma_.get_array_module() paid = obj.sigma_[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj.sigma_[item] incurred = obj.sigma_[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj.sigma_[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_sigma_ = xp.concatenate((paid, incurred), axis=0) </DeepExtract> <DeepExtract> xp = obj.get_array_module() (p, i) = (self.p_to_i_X_[0], self.p_to_i_X_[1]) modelsP = WeightedRegression(axis=2, thru_orig=True, xp=xp) modelsP = modelsP.fit(p, i, 1 / p).sigma_fill(obj.sigma_interpolation) modelsI = WeightedRegression(axis=2, thru_orig=True, xp=xp) modelsI = modelsI.fit(i, p, 1 / i).sigma_fill(obj.sigma_interpolation) q_f = self._p_to_i_concate(modelsP.slope_, modelsI.slope_, xp) rho_sigma = self._p_to_i_concate(modelsP.sigma_, modelsI.sigma_, xp) (self.q_f_, self.rho_sigma_) = (xp.swapaxes(q_f, -1, -2), xp.swapaxes(rho_sigma, -1, -2)) </DeepExtract> <DeepExtract> xp = obj.get_array_module() p_to_i_ata = self._get_p_to_i_object(obj.link_ratio) p_to_i_ldf = self.p_to_i_ldf_ p_to_i_sigma = self.p_to_i_sigma_ (paid, incurred) = (self.p_to_i_X_[0], self.p_to_i_X_[1]) p_to_i_ldf = xp.unique(p_to_i_ldf, axis=-2) p_to_i_sigma = xp.unique(p_to_i_sigma, axis=-2) residP = (p_to_i_ata[0] - p_to_i_ldf[0]) / p_to_i_sigma[0] * xp.sqrt(paid[..., :-1, :-1]) residI = (p_to_i_ata[1] - p_to_i_ldf[1]) / p_to_i_sigma[1] * xp.sqrt(incurred[..., :-1, :-1]) nans = (obj - obj[obj.valuation == obj.valuation_date]).values[0, 0] * 0 + 1 q_resid = (paid / incurred - self.q_f_[1]) / self.rho_sigma_[1] * xp.sqrt(incurred) * nans q_inv_resid = (incurred / paid - 1 / self.q_f_[1]) / self.rho_sigma_[0] * xp.sqrt(paid) * nans resid = self._p_to_i_concate(residP, residI, xp) q_resid = self._p_to_i_concate(q_inv_resid, q_resid, xp) (self.residual_, self.q_resid_) = (resid, q_resid) </DeepExtract> <DeepExtract> xp = obj.get_array_module() (k, v, o, d) = self.residual_[1].shape w = xp.reshape(self.residual_[1], (k, v, o * d)) w[w == 0] = xp.nan w = w * 0 + 1 lambdaI = WeightedRegression(thru_orig=True, axis=-1, xp=xp).fit(xp.reshape(self.q_resid_[1][..., :-1, :-1], (k, v, o * d)), xp.reshape(self.residual_[1], (k, v, o * d)), w).slope_ lambdaP = WeightedRegression(thru_orig=True, axis=-1, xp=xp).fit(xp.reshape(self.q_resid_[0][..., :-1, :-1], (k, v, o * d)), xp.reshape(self.residual_[0], (k, v, o * d)), w).slope_ self.lambda_coef_ = self._p_to_i_concate(lambdaP, lambdaI, xp)[..., None] </DeepExtract> <DeepExtract> ldf_tri = self._get_mcl_cdf(obj, self.munich_full_triangle_).values.copy() xp = obj.get_array_module() ldf_tri = xp.concatenate((ldf_tri, xp.ones(ldf_tri.shape)[..., -1:]), -1) ldf_tri = ldf_tri[..., :-1] / ldf_tri[..., 1:] obj = self._get_mcl_cdf(obj, self.munich_full_triangle_).copy() obj.values = ldf_tri obj.ddims = obj.link_ratio.ddims obj.is_pattern = True obj.is_cumulative = False obj._set_slicers self.ldf_ = obj </DeepExtract> self.ldf_.is_cumulative = False self.ldf_.valuation_date = pd.to_datetime(options.ULT_VAL) self._map = {list(X.columns).index(x): (num % 2, num // 2) for (num, x) in enumerate(np.array(self.paid_to_incurred).flatten())} <DeepExtract> xp = self.xp if hasattr(self, 'xp') else self.ldf_.get_array_module() map = self._map self.rho_.values = xp.concatenate([getattr(self, 'rho_sigma_')[map[k][0], :, map[k][1]:map[k][1] + 1, ...] for k in range(len(map))], axis=1) </DeepExtract> return self
def fit(self, X, y=None, sample_weight=None): """Fit the model with X. Parameters ---------- X : Triangle-like Set of LDFs to which the munich adjustment will be applied. y : Ignored sample_weight : Ignored Returns ------- self : object Returns the instance itself. """ from chainladder import options if self.paid_to_incurred is None: raise ValueError('Must enter valid value for paid_to_incurred.') if X.array_backend == 'sparse': obj = X.set_backend('numpy') else: obj = X.copy() xp = obj.get_array_module() self.xp = xp missing = xp.nan_to_num(obj.values) * obj.nan_triangle == 0 self.rho_ = obj[obj.origin == obj.origin.min()] if len(xp.where(missing)[0]) > 0: if self.fillna: from chainladder.methods import Chainladder filler = Chainladder().fit(obj).full_expectation_ filler = filler[filler.valuation <= obj.valuation_date].values obj.values = xp.where(missing, filler, obj.values) else: raise ValueError('MunichAdjustment cannot be performed when P/I or I/P ' + 'ratios cannot be computed. Use `fillna=True` to impute zero' + ' values of the triangle with simple chainladder expectation.') if 'ldf_' not in obj: obj = Development().fit_transform(obj) if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.get_array_module() paid = obj[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj[item] incurred = obj[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_X_ = xp.concatenate((paid, incurred), axis=0) if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.ldf_.get_array_module() paid = obj.ldf_[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj.ldf_[item] incurred = obj.ldf_[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj.ldf_[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_ldf_ = xp.concatenate((paid, incurred), axis=0) if type(self.paid_to_incurred) is tuple: p_to_i = [self.paid_to_incurred] else: p_to_i = self.paid_to_incurred xp = obj.sigma_.get_array_module() paid = obj.sigma_[[item[0] for item in p_to_i][0]] for item in [item[0] for item in p_to_i][1:]: paid[item] = obj.sigma_[item] incurred = obj.sigma_[[item[1] for item in p_to_i][0]] for item in [item[1] for item in p_to_i][1:]: incurred[item] = obj.sigma_[item] paid = paid.values[None] incurred = incurred.values[None] self.p_to_i_sigma_ = xp.concatenate((paid, incurred), axis=0) xp = obj.get_array_module() (p, i) = (self.p_to_i_X_[0], self.p_to_i_X_[1]) modelsP = WeightedRegression(axis=2, thru_orig=True, xp=xp) modelsP = modelsP.fit(p, i, 1 / p).sigma_fill(obj.sigma_interpolation) modelsI = WeightedRegression(axis=2, thru_orig=True, xp=xp) modelsI = modelsI.fit(i, p, 1 / i).sigma_fill(obj.sigma_interpolation) q_f = self._p_to_i_concate(modelsP.slope_, modelsI.slope_, xp) rho_sigma = self._p_to_i_concate(modelsP.sigma_, modelsI.sigma_, xp) (self.q_f_, self.rho_sigma_) = (xp.swapaxes(q_f, -1, -2), xp.swapaxes(rho_sigma, -1, -2)) xp = obj.get_array_module() p_to_i_ata = self._get_p_to_i_object(obj.link_ratio) p_to_i_ldf = self.p_to_i_ldf_ p_to_i_sigma = self.p_to_i_sigma_ (paid, incurred) = (self.p_to_i_X_[0], self.p_to_i_X_[1]) p_to_i_ldf = xp.unique(p_to_i_ldf, axis=-2) p_to_i_sigma = xp.unique(p_to_i_sigma, axis=-2) residP = (p_to_i_ata[0] - p_to_i_ldf[0]) / p_to_i_sigma[0] * xp.sqrt(paid[..., :-1, :-1]) residI = (p_to_i_ata[1] - p_to_i_ldf[1]) / p_to_i_sigma[1] * xp.sqrt(incurred[..., :-1, :-1]) nans = (obj - obj[obj.valuation == obj.valuation_date]).values[0, 0] * 0 + 1 q_resid = (paid / incurred - self.q_f_[1]) / self.rho_sigma_[1] * xp.sqrt(incurred) * nans q_inv_resid = (incurred / paid - 1 / self.q_f_[1]) / self.rho_sigma_[0] * xp.sqrt(paid) * nans resid = self._p_to_i_concate(residP, residI, xp) q_resid = self._p_to_i_concate(q_inv_resid, q_resid, xp) (self.residual_, self.q_resid_) = (resid, q_resid) xp = obj.get_array_module() (k, v, o, d) = self.residual_[1].shape w = xp.reshape(self.residual_[1], (k, v, o * d)) w[w == 0] = xp.nan w = w * 0 + 1 lambdaI = WeightedRegression(thru_orig=True, axis=-1, xp=xp).fit(xp.reshape(self.q_resid_[1][..., :-1, :-1], (k, v, o * d)), xp.reshape(self.residual_[1], (k, v, o * d)), w).slope_ lambdaP = WeightedRegression(thru_orig=True, axis=-1, xp=xp).fit(xp.reshape(self.q_resid_[0][..., :-1, :-1], (k, v, o * d)), xp.reshape(self.residual_[0], (k, v, o * d)), w).slope_ self.lambda_coef_ = self._p_to_i_concate(lambdaP, lambdaI, xp)[..., None] ldf_tri = self._get_mcl_cdf(obj, self.munich_full_triangle_).values.copy() xp = obj.get_array_module() ldf_tri = xp.concatenate((ldf_tri, xp.ones(ldf_tri.shape)[..., -1:]), -1) ldf_tri = ldf_tri[..., :-1] / ldf_tri[..., 1:] obj = self._get_mcl_cdf(obj, self.munich_full_triangle_).copy() obj.values = ldf_tri obj.ddims = obj.link_ratio.ddims obj.is_pattern = True obj.is_cumulative = False obj._set_slicers self.ldf_ = obj self.ldf_.is_cumulative = False self.ldf_.valuation_date = pd.to_datetime(options.ULT_VAL) self._map = {list(X.columns).index(x): (num % 2, num // 2) for (num, x) in enumerate(np.array(self.paid_to_incurred).flatten())} xp = self.xp if hasattr(self, 'xp') else self.ldf_.get_array_module() map = self._map self.rho_.values = xp.concatenate([getattr(self, 'rho_sigma_')[map[k][0], :, map[k][1]:map[k][1] + 1, ...] for k in range(len(map))], axis=1) return self
chainladder-python
positive
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None): """ greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh, overlap = oks :param kpts_db :param thresh: retain overlap < thresh :return: indexes to keep """ if len(kpts_db) == 0: return [] scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))]) kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))]) areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))]) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) <DeepExtract> if not isinstance(sigmas, np.ndarray): sigmas = np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89]) / 10.0 vars = (sigmas * 2) ** 2 xg = kpts[i][0::3] yg = kpts[i][1::3] vg = kpts[i][2::3] ious = np.zeros(kpts[order[1:]].shape[0]) for n_d in range(0, kpts[order[1:]].shape[0]): xd = kpts[order[1:]][n_d, 0::3] yd = kpts[order[1:]][n_d, 1::3] vd = kpts[order[1:]][n_d, 2::3] dx = xd - xg dy = yd - yg e = (dx ** 2 + dy ** 2) / vars / ((areas[i] + areas[order[1:]][n_d]) / 2 + np.spacing(1)) / 2 if in_vis_thre is not None: ind = list(vg > in_vis_thre) and list(vd > in_vis_thre) e = e[ind] ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0 oks_ovr = ious </DeepExtract> inds = np.where(oks_ovr <= thresh)[0] order = order[inds + 1] return keep
def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None): """ greedily select boxes with high confidence and overlap with current maximum <= thresh rule out overlap >= thresh, overlap = oks :param kpts_db :param thresh: retain overlap < thresh :return: indexes to keep """ if len(kpts_db) == 0: return [] scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))]) kpts = np.array([kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))]) areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))]) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) if not isinstance(sigmas, np.ndarray): sigmas = np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89]) / 10.0 vars = (sigmas * 2) ** 2 xg = kpts[i][0::3] yg = kpts[i][1::3] vg = kpts[i][2::3] ious = np.zeros(kpts[order[1:]].shape[0]) for n_d in range(0, kpts[order[1:]].shape[0]): xd = kpts[order[1:]][n_d, 0::3] yd = kpts[order[1:]][n_d, 1::3] vd = kpts[order[1:]][n_d, 2::3] dx = xd - xg dy = yd - yg e = (dx ** 2 + dy ** 2) / vars / ((areas[i] + areas[order[1:]][n_d]) / 2 + np.spacing(1)) / 2 if in_vis_thre is not None: ind = list(vg > in_vis_thre) and list(vd > in_vis_thre) e = e[ind] ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0 oks_ovr = ious inds = np.where(oks_ovr <= thresh)[0] order = order[inds + 1] return keep
AICity
positive
def gen_obs(self): """ Generate the agent's view (partially observable, low-resolution encoding) """ <DeepExtract> (topX, topY, botX, botY) = self.get_view_exts() grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size) for i in range(self.agent_dir + 1): grid = grid.rotate_left() if not self.see_through_walls: vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1)) else: vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool) agent_pos = (grid.width // 2, grid.height - 1) if self.carrying: grid.set(*agent_pos, self.carrying) else: grid.set(*agent_pos, None) (grid, vis_mask) = (grid, vis_mask) </DeepExtract> image = grid.encode(vis_mask) assert hasattr(self, 'mission'), 'environments must define a textual mission string' obs = {'image': image, 'direction': self.agent_dir, 'mission': self.mission} return obs
def gen_obs(self): """ Generate the agent's view (partially observable, low-resolution encoding) """ (topX, topY, botX, botY) = self.get_view_exts() grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size) for i in range(self.agent_dir + 1): grid = grid.rotate_left() if not self.see_through_walls: vis_mask = grid.process_vis(agent_pos=(self.agent_view_size // 2, self.agent_view_size - 1)) else: vis_mask = np.ones(shape=(grid.width, grid.height), dtype=np.bool) agent_pos = (grid.width // 2, grid.height - 1) if self.carrying: grid.set(*agent_pos, self.carrying) else: grid.set(*agent_pos, None) (grid, vis_mask) = (grid, vis_mask) image = grid.encode(vis_mask) assert hasattr(self, 'mission'), 'environments must define a textual mission string' obs = {'image': image, 'direction': self.agent_dir, 'mission': self.mission} return obs
d4rl
positive
def virtual_schema_iterative_search(self, list_attributes: [str], list_samples: [str], perf_stats, max_hops=2, debug_enumerate_all_jps=False): st_stage1 = time.time() assert len(list_attributes) == len(list_samples) sch_def = {attr: value for (attr, value) in zip(list_attributes, list_samples)} sch_def = OrderedDict(sorted(sch_def.items(), key=lambda x: x[0], reverse=True)) <DeepExtract> filter_drs = dict() filter_id = 0 for (attr, cell) in sch_def.items(): if cell == '': drs = self.aurum_api.search_exact_attribute(attr, max_results=50) filter_drs[attr, FilterType.ATTR, filter_id] = drs else: drs_attr = self.aurum_api.search_exact_attribute(attr, max_results=50) drs_cell = self.aurum_api.search_content(cell, max_results=500) drs = self.aurum_api.intersection(drs_attr, drs_cell) filter_drs[cell, FilterType.CELL, filter_id] = drs filter_id += 1 filter_drs = filter_drs </DeepExtract> et_stage1 = time.time() perf_stats['t_stage1'] = et_stage1 - st_stage1 st_stage2 = time.time() table_fulfilled_filters = defaultdict(list) table_nid = dict() for (filter, drs) in filter_drs.items(): drs.set_table_mode() for table in drs: if filter[1] == FilterType.ATTR: columns = [c for c in drs.data] for c in columns: if c.source_name == table: table_nid[table] = c.nid if filter[2] not in [id for (_, _, id) in table_fulfilled_filters[table]]: table_fulfilled_filters[table].append(((filter[0], None), FilterType.ATTR, filter[2])) elif filter[1] == FilterType.CELL: columns = [c for c in drs.data] for c in columns: if c.source_name == table: table_nid[table] = c.nid if filter[2] not in [id for (_, _, id) in table_fulfilled_filters[table]]: table_fulfilled_filters[table].append(((filter[0], c.field_name), FilterType.CELL, filter[2])) <DeepExtract> table_path = dict() for (table, nid) in table_nid.items(): path = self.aurum_api.helper.get_path_nid(nid) table_path[table] = path table_path = table_path </DeepExtract> table_fulfilled_filters = OrderedDict(sorted(table_fulfilled_filters.items(), key=lambda el: (len({filter_id for (_, _, filter_id) in el[1]}), el[0]), reverse=True)) for (k, v) in table_fulfilled_filters.items(): v = sorted(v, key=lambda el: (el[2], el[0][0]), reverse=True) table_fulfilled_filters[k] = v def eager_candidate_exploration(): def covers_filters(candidate_filters, all_filters): all_filters_set = set([id for (_, _, id) in filter_drs.keys()]) candidate_filters_set = set([id for (_, _, id) in candidate_filters]) if len(candidate_filters_set) == len(all_filters_set): return True return False def compute_size_filter_ix(filters, candidate_group_filters_covered): new_fs_set = set([id for (_, _, id) in filters]) candidate_fs_set = set([id for (_, _, id) in candidate_group_filters_covered]) ix_size = len(new_fs_set.union(candidate_fs_set)) - len(candidate_fs_set) return ix_size def clear_state(): candidate_group.clear() candidate_group_filters_covered.clear() backup = [] go_on = True while go_on: candidate_group = [] candidate_group_filters_covered = set() for i in range(len(list(table_fulfilled_filters.items()))): (table_pivot, filters_pivot) = list(table_fulfilled_filters.items())[i] candidate_group.append(table_pivot) candidate_group_filters_covered.update(filters_pivot) if covers_filters(candidate_group_filters_covered, filter_drs.items()): candidate_group = sorted(candidate_group) yield (candidate_group, candidate_group_filters_covered) <DeepExtract> candidate_group.clear() candidate_group_filters_covered.clear() </DeepExtract> continue for j in range(len(list(table_fulfilled_filters.items()))): idx = i + j + 1 if idx == len(table_fulfilled_filters.items()): break (table, filters) = list(table_fulfilled_filters.items())[idx] <DeepExtract> new_fs_set = set([id for (_, _, id) in filters]) candidate_fs_set = set([id for (_, _, id) in candidate_group_filters_covered]) ix_size = len(new_fs_set.union(candidate_fs_set)) - len(candidate_fs_set) new_filters = ix_size </DeepExtract> if new_filters > 0: candidate_group.append(table) candidate_group_filters_covered.update(filters) if covers_filters(candidate_group_filters_covered, filter_drs.items()): candidate_group = sorted(candidate_group) yield (candidate_group, candidate_group_filters_covered) <DeepExtract> candidate_group.clear() candidate_group_filters_covered.clear() </DeepExtract> candidate_group.append(table_pivot) candidate_group_filters_covered.update(filters_pivot) candidate_group = sorted(candidate_group) if covers_filters(candidate_group_filters_covered, filter_drs.items()): yield (candidate_group, candidate_group_filters_covered) else: backup.append(([el for el in candidate_group], set([el for el in candidate_group_filters_covered]))) <DeepExtract> candidate_group.clear() candidate_group_filters_covered.clear() </DeepExtract> for (candidate_group, candidate_group_filters_covered) in backup: yield (candidate_group, candidate_group_filters_covered) go_on = False et_stage2 = time.time() perf_stats['t_stage2'] = et_stage2 - st_stage2 cache_unjoinable_pairs = defaultdict(int) perf_stats['time_joinable'] = 0 perf_stats['time_is_materializable'] = 0 perf_stats['time_materialize'] = 0 num_candidate_groups = 0 for (candidate_group, candidate_group_filters_covered) in eager_candidate_exploration(): num_candidate_groups += 1 print('') print('Candidate group: ' + str(candidate_group)) num_unique_filters = len({f_id for (_, _, f_id) in candidate_group_filters_covered}) print('Covers #Filters: ' + str(num_unique_filters)) if len(candidate_group) == 1: table = candidate_group[0] path = table_path[table] materialized_virtual_schema = dpu.read_relation(path + '/' + table) attrs_to_project = dpu.obtain_attributes_to_project(candidate_group_filters_covered) view_metadata = dict() view_metadata['#join_graphs'] = 1 view_metadata['join_graph'] = {'nodes': [{'id': -101010, 'label': table}], 'edges': []} if 'single_relation_group' not in perf_stats: perf_stats['single_relation_group'] = 0 perf_stats['single_relation_group'] += 1 yield (materialized_virtual_schema, attrs_to_project, view_metadata) continue max_hops = max_hops st_joinable = time.time() <DeepExtract> assert len(candidate_group) > 1 max_hops = max_hops paths_per_pair = defaultdict(list) table_combinations = [el for el in itertools.combinations(candidate_group, 2)] for (table1, table2) in tqdm(table_combinations): if (table1, table2) in cache_unjoinable_pairs.keys() or (table2, table1) in cache_unjoinable_pairs.keys(): continue t1 = self.aurum_api.make_drs(table1) t2 = self.aurum_api.make_drs(table2) t1.set_table_mode() t2.set_table_mode() paths = self.are_paths_in_cache(table1, table2) if paths is None: print('Finding paths between ' + str(table1) + ' and ' + str(table2)) print('max hops: ' + str(max_hops)) s = time.time() drs = self.aurum_api.paths(t1, t2, Relation.PKFK, max_hops=max_hops, lean_search=True) e = time.time() print('Total time: ' + str(e - s)) paths = drs.paths() self.place_paths_in_cache(table1, table2, paths) if len(paths) == 0: cache_unjoinable_pairs[table1, table2] += 1 cache_unjoinable_pairs[table2, table1] += 1 for p in paths: tables_covered = set() tables_in_group = set(candidate_group) for hop in p: if hop.source_name in tables_in_group: tables_covered.add(hop.source_name) paths_per_pair[table1, table2].append((p, tables_covered)) if len(paths_per_pair) == 0: join_graphs = [] all_combinations = [el for el in itertools.product(*list(paths_per_pair.values()))] deduplicated_paths = dict() for path_combination in all_combinations: for (p1, p2) in itertools.combinations(path_combination, 2): (path1, tables_covered1) = p1 (path2, tables_covered2) = p2 if len(tables_covered1) > len(tables_covered2): current_cover_len = len(tables_covered1) else: current_cover_len = len(tables_covered2) potential_cover = tables_covered1.union(tables_covered2) joinable_paths = tables_covered1.intersection(tables_covered2) potential_cover_len = len(potential_cover) if potential_cover_len > current_cover_len and len(joinable_paths) > 0: tx_path1 = self.transform_join_path_to_pair_hop(path1) tx_path2 = self.transform_join_path_to_pair_hop(path2) combined_path = tx_path1 + tx_path2 path_id = self.compute_join_graph_id(combined_path) if path_id not in deduplicated_paths: deduplicated_paths[path_id] = (combined_path, potential_cover) for (p, tables_covered) in list(paths_per_pair.values())[0]: if len(tables_covered) == len(candidate_group): tx_p = self.transform_join_path_to_pair_hop(p) path_id = self.compute_join_graph_id(tx_p) deduplicated_paths[path_id] = (tx_p, tables_covered) covering_join_graphs = [jg[0] for (_, jg) in deduplicated_paths.items() if len(jg[1]) == len(candidate_group)] join_graphs = sorted(covering_join_graphs, key=lambda x: len(x)) join_graphs = join_graphs </DeepExtract> et_joinable = time.time() perf_stats['time_joinable'] += et_joinable - st_joinable if debug_enumerate_all_jps: for (i, group) in enumerate(join_graphs): print('Group: ' + str(i)) for el in group: print(el) continue if len(join_graphs) == 0: if 'unjoinable_candidate_group' not in perf_stats: perf_stats['unjoinable_candidate_group'] = 0 perf_stats['unjoinable_candidate_group'] += 1 print('Group: ' + str(candidate_group) + ' is Non-Joinable with max_hops=' + str(max_hops)) continue if 'joinable_candidate_group' not in perf_stats: perf_stats['joinable_candidate_group'] = 0 perf_stats['joinable_candidate_group'] += 1 if 'num_join_graphs_per_candidate_group' not in perf_stats: perf_stats['num_join_graphs_per_candidate_group'] = [] perf_stats['num_join_graphs_per_candidate_group'].append(len(join_graphs)) total_materializable_join_graphs = 0 materializable_join_graphs = [] for jpg in join_graphs: filters = set() for (l, r) in jpg: if l.source_name in table_fulfilled_filters: filters.update(table_fulfilled_filters[l.source_name]) if r.source_name in table_fulfilled_filters: filters.update(table_fulfilled_filters[r.source_name]) st_is_materializable = time.time() if sum([0] + [1 for el in list_samples if el != '']) > 0: <DeepExtract> local_intermediates = dict() for (l, r) in jpg: if l.source_name not in local_intermediates: l_path = self.aurum_api.helper.get_path_nid(l.nid) if l.source_name in table_fulfilled_filters: filters_l = table_fulfilled_filters[l.source_name] filtered_l = None for (info, filter_type, filter_id) in filters_l: if filter_type == FilterType.ATTR: filtered_l = dpu.read_relation_on_copy(l_path + l.source_name) continue attribute = info[1] cell_value = info[0] filtered_l = dpu.apply_filter(l_path + l.source_name, attribute, cell_value) if len(filtered_l) == 0: is_join_graph_valid = False else: filtered_l = dpu.read_relation_on_copy(l_path + l.source_name) else: filtered_l = local_intermediates[l.source_name] local_intermediates[l.source_name] = filtered_l if r.source_name not in local_intermediates: r_path = self.aurum_api.helper.get_path_nid(r.nid) if r.source_name in table_fulfilled_filters: filters_r = table_fulfilled_filters[r.source_name] filtered_r = None for (info, filter_type, filter_id) in filters_r: if filter_type == FilterType.ATTR: filtered_r = dpu.read_relation_on_copy(r_path + r.source_name) continue attribute = info[1] cell_value = info[0] filtered_r = dpu.apply_filter(r_path + r.source_name, attribute, cell_value) if len(filtered_r) == 0: is_join_graph_valid = False else: filtered_r = dpu.read_relation_on_copy(r_path + r.source_name) else: filtered_r = local_intermediates[r.source_name] local_intermediates[r.source_name] = filtered_r joined = dpu.join_ab_on_key(filtered_l, filtered_r, l.field_name, r.field_name, suffix_str='_x') if len(joined) == 0: is_join_graph_valid = False is_join_graph_valid = True </DeepExtract> else: is_join_graph_valid = True et_is_materializable = time.time() perf_stats['time_is_materializable'] += et_is_materializable - st_is_materializable if is_join_graph_valid: total_materializable_join_graphs += 1 materializable_join_graphs.append((jpg, filters)) st_materialize = time.time() <DeepExtract> to_return = [] for (mjg, filters) in materializable_join_graphs: attrs_to_project = dpu.obtain_attributes_to_project(filters) materialized_virtual_schema = dpu.materialize_join_graph_sample(mjg, self, sample_size=1000) if materialized_virtual_schema is False: continue view_metadata = dict() view_metadata['#join_graphs'] = len(materializable_join_graphs) view_metadata['join_graph'] = self.format_join_graph_into_nodes_edges(mjg) to_return.append((materialized_virtual_schema, attrs_to_project, view_metadata)) to_return = to_return </DeepExtract> et_materialize = time.time() perf_stats['time_materialize'] += et_materialize - st_materialize for el in to_return: if 'actually_materialized' not in perf_stats: perf_stats['actually_materialized'] = 0 perf_stats['actually_materialized'] += 1 yield el if 'materializable_join_graphs' not in perf_stats: perf_stats['materializable_join_graphs'] = [] perf_stats['materializable_join_graphs'].append(total_materializable_join_graphs) perf_stats['num_candidate_groups'] = num_candidate_groups print('Finished enumerating groups') cache_unjoinable_pairs = OrderedDict(sorted(cache_unjoinable_pairs.items(), key=lambda x: x[1], reverse=True)) for (k, v) in cache_unjoinable_pairs.items(): print(str(k) + ' => ' + str(v))
def virtual_schema_iterative_search(self, list_attributes: [str], list_samples: [str], perf_stats, max_hops=2, debug_enumerate_all_jps=False): st_stage1 = time.time() assert len(list_attributes) == len(list_samples) sch_def = {attr: value for (attr, value) in zip(list_attributes, list_samples)} sch_def = OrderedDict(sorted(sch_def.items(), key=lambda x: x[0], reverse=True)) filter_drs = dict() filter_id = 0 for (attr, cell) in sch_def.items(): if cell == '': drs = self.aurum_api.search_exact_attribute(attr, max_results=50) filter_drs[attr, FilterType.ATTR, filter_id] = drs else: drs_attr = self.aurum_api.search_exact_attribute(attr, max_results=50) drs_cell = self.aurum_api.search_content(cell, max_results=500) drs = self.aurum_api.intersection(drs_attr, drs_cell) filter_drs[cell, FilterType.CELL, filter_id] = drs filter_id += 1 filter_drs = filter_drs et_stage1 = time.time() perf_stats['t_stage1'] = et_stage1 - st_stage1 st_stage2 = time.time() table_fulfilled_filters = defaultdict(list) table_nid = dict() for (filter, drs) in filter_drs.items(): drs.set_table_mode() for table in drs: if filter[1] == FilterType.ATTR: columns = [c for c in drs.data] for c in columns: if c.source_name == table: table_nid[table] = c.nid if filter[2] not in [id for (_, _, id) in table_fulfilled_filters[table]]: table_fulfilled_filters[table].append(((filter[0], None), FilterType.ATTR, filter[2])) elif filter[1] == FilterType.CELL: columns = [c for c in drs.data] for c in columns: if c.source_name == table: table_nid[table] = c.nid if filter[2] not in [id for (_, _, id) in table_fulfilled_filters[table]]: table_fulfilled_filters[table].append(((filter[0], c.field_name), FilterType.CELL, filter[2])) table_path = dict() for (table, nid) in table_nid.items(): path = self.aurum_api.helper.get_path_nid(nid) table_path[table] = path table_path = table_path table_fulfilled_filters = OrderedDict(sorted(table_fulfilled_filters.items(), key=lambda el: (len({filter_id for (_, _, filter_id) in el[1]}), el[0]), reverse=True)) for (k, v) in table_fulfilled_filters.items(): v = sorted(v, key=lambda el: (el[2], el[0][0]), reverse=True) table_fulfilled_filters[k] = v def eager_candidate_exploration(): def covers_filters(candidate_filters, all_filters): all_filters_set = set([id for (_, _, id) in filter_drs.keys()]) candidate_filters_set = set([id for (_, _, id) in candidate_filters]) if len(candidate_filters_set) == len(all_filters_set): return True return False def compute_size_filter_ix(filters, candidate_group_filters_covered): new_fs_set = set([id for (_, _, id) in filters]) candidate_fs_set = set([id for (_, _, id) in candidate_group_filters_covered]) ix_size = len(new_fs_set.union(candidate_fs_set)) - len(candidate_fs_set) return ix_size def clear_state(): candidate_group.clear() candidate_group_filters_covered.clear() backup = [] go_on = True while go_on: candidate_group = [] candidate_group_filters_covered = set() for i in range(len(list(table_fulfilled_filters.items()))): (table_pivot, filters_pivot) = list(table_fulfilled_filters.items())[i] candidate_group.append(table_pivot) candidate_group_filters_covered.update(filters_pivot) if covers_filters(candidate_group_filters_covered, filter_drs.items()): candidate_group = sorted(candidate_group) yield (candidate_group, candidate_group_filters_covered) candidate_group.clear() candidate_group_filters_covered.clear() continue for j in range(len(list(table_fulfilled_filters.items()))): idx = i + j + 1 if idx == len(table_fulfilled_filters.items()): break (table, filters) = list(table_fulfilled_filters.items())[idx] new_fs_set = set([id for (_, _, id) in filters]) candidate_fs_set = set([id for (_, _, id) in candidate_group_filters_covered]) ix_size = len(new_fs_set.union(candidate_fs_set)) - len(candidate_fs_set) new_filters = ix_size if new_filters > 0: candidate_group.append(table) candidate_group_filters_covered.update(filters) if covers_filters(candidate_group_filters_covered, filter_drs.items()): candidate_group = sorted(candidate_group) yield (candidate_group, candidate_group_filters_covered) candidate_group.clear() candidate_group_filters_covered.clear() candidate_group.append(table_pivot) candidate_group_filters_covered.update(filters_pivot) candidate_group = sorted(candidate_group) if covers_filters(candidate_group_filters_covered, filter_drs.items()): yield (candidate_group, candidate_group_filters_covered) else: backup.append(([el for el in candidate_group], set([el for el in candidate_group_filters_covered]))) candidate_group.clear() candidate_group_filters_covered.clear() for (candidate_group, candidate_group_filters_covered) in backup: yield (candidate_group, candidate_group_filters_covered) go_on = False et_stage2 = time.time() perf_stats['t_stage2'] = et_stage2 - st_stage2 cache_unjoinable_pairs = defaultdict(int) perf_stats['time_joinable'] = 0 perf_stats['time_is_materializable'] = 0 perf_stats['time_materialize'] = 0 num_candidate_groups = 0 for (candidate_group, candidate_group_filters_covered) in eager_candidate_exploration(): num_candidate_groups += 1 print('') print('Candidate group: ' + str(candidate_group)) num_unique_filters = len({f_id for (_, _, f_id) in candidate_group_filters_covered}) print('Covers #Filters: ' + str(num_unique_filters)) if len(candidate_group) == 1: table = candidate_group[0] path = table_path[table] materialized_virtual_schema = dpu.read_relation(path + '/' + table) attrs_to_project = dpu.obtain_attributes_to_project(candidate_group_filters_covered) view_metadata = dict() view_metadata['#join_graphs'] = 1 view_metadata['join_graph'] = {'nodes': [{'id': -101010, 'label': table}], 'edges': []} if 'single_relation_group' not in perf_stats: perf_stats['single_relation_group'] = 0 perf_stats['single_relation_group'] += 1 yield (materialized_virtual_schema, attrs_to_project, view_metadata) continue max_hops = max_hops st_joinable = time.time() assert len(candidate_group) > 1 max_hops = max_hops paths_per_pair = defaultdict(list) table_combinations = [el for el in itertools.combinations(candidate_group, 2)] for (table1, table2) in tqdm(table_combinations): if (table1, table2) in cache_unjoinable_pairs.keys() or (table2, table1) in cache_unjoinable_pairs.keys(): continue t1 = self.aurum_api.make_drs(table1) t2 = self.aurum_api.make_drs(table2) t1.set_table_mode() t2.set_table_mode() paths = self.are_paths_in_cache(table1, table2) if paths is None: print('Finding paths between ' + str(table1) + ' and ' + str(table2)) print('max hops: ' + str(max_hops)) s = time.time() drs = self.aurum_api.paths(t1, t2, Relation.PKFK, max_hops=max_hops, lean_search=True) e = time.time() print('Total time: ' + str(e - s)) paths = drs.paths() self.place_paths_in_cache(table1, table2, paths) if len(paths) == 0: cache_unjoinable_pairs[table1, table2] += 1 cache_unjoinable_pairs[table2, table1] += 1 for p in paths: tables_covered = set() tables_in_group = set(candidate_group) for hop in p: if hop.source_name in tables_in_group: tables_covered.add(hop.source_name) paths_per_pair[table1, table2].append((p, tables_covered)) if len(paths_per_pair) == 0: join_graphs = [] all_combinations = [el for el in itertools.product(*list(paths_per_pair.values()))] deduplicated_paths = dict() for path_combination in all_combinations: for (p1, p2) in itertools.combinations(path_combination, 2): (path1, tables_covered1) = p1 (path2, tables_covered2) = p2 if len(tables_covered1) > len(tables_covered2): current_cover_len = len(tables_covered1) else: current_cover_len = len(tables_covered2) potential_cover = tables_covered1.union(tables_covered2) joinable_paths = tables_covered1.intersection(tables_covered2) potential_cover_len = len(potential_cover) if potential_cover_len > current_cover_len and len(joinable_paths) > 0: tx_path1 = self.transform_join_path_to_pair_hop(path1) tx_path2 = self.transform_join_path_to_pair_hop(path2) combined_path = tx_path1 + tx_path2 path_id = self.compute_join_graph_id(combined_path) if path_id not in deduplicated_paths: deduplicated_paths[path_id] = (combined_path, potential_cover) for (p, tables_covered) in list(paths_per_pair.values())[0]: if len(tables_covered) == len(candidate_group): tx_p = self.transform_join_path_to_pair_hop(p) path_id = self.compute_join_graph_id(tx_p) deduplicated_paths[path_id] = (tx_p, tables_covered) covering_join_graphs = [jg[0] for (_, jg) in deduplicated_paths.items() if len(jg[1]) == len(candidate_group)] join_graphs = sorted(covering_join_graphs, key=lambda x: len(x)) join_graphs = join_graphs et_joinable = time.time() perf_stats['time_joinable'] += et_joinable - st_joinable if debug_enumerate_all_jps: for (i, group) in enumerate(join_graphs): print('Group: ' + str(i)) for el in group: print(el) continue if len(join_graphs) == 0: if 'unjoinable_candidate_group' not in perf_stats: perf_stats['unjoinable_candidate_group'] = 0 perf_stats['unjoinable_candidate_group'] += 1 print('Group: ' + str(candidate_group) + ' is Non-Joinable with max_hops=' + str(max_hops)) continue if 'joinable_candidate_group' not in perf_stats: perf_stats['joinable_candidate_group'] = 0 perf_stats['joinable_candidate_group'] += 1 if 'num_join_graphs_per_candidate_group' not in perf_stats: perf_stats['num_join_graphs_per_candidate_group'] = [] perf_stats['num_join_graphs_per_candidate_group'].append(len(join_graphs)) total_materializable_join_graphs = 0 materializable_join_graphs = [] for jpg in join_graphs: filters = set() for (l, r) in jpg: if l.source_name in table_fulfilled_filters: filters.update(table_fulfilled_filters[l.source_name]) if r.source_name in table_fulfilled_filters: filters.update(table_fulfilled_filters[r.source_name]) st_is_materializable = time.time() if sum([0] + [1 for el in list_samples if el != '']) > 0: local_intermediates = dict() for (l, r) in jpg: if l.source_name not in local_intermediates: l_path = self.aurum_api.helper.get_path_nid(l.nid) if l.source_name in table_fulfilled_filters: filters_l = table_fulfilled_filters[l.source_name] filtered_l = None for (info, filter_type, filter_id) in filters_l: if filter_type == FilterType.ATTR: filtered_l = dpu.read_relation_on_copy(l_path + l.source_name) continue attribute = info[1] cell_value = info[0] filtered_l = dpu.apply_filter(l_path + l.source_name, attribute, cell_value) if len(filtered_l) == 0: is_join_graph_valid = False else: filtered_l = dpu.read_relation_on_copy(l_path + l.source_name) else: filtered_l = local_intermediates[l.source_name] local_intermediates[l.source_name] = filtered_l if r.source_name not in local_intermediates: r_path = self.aurum_api.helper.get_path_nid(r.nid) if r.source_name in table_fulfilled_filters: filters_r = table_fulfilled_filters[r.source_name] filtered_r = None for (info, filter_type, filter_id) in filters_r: if filter_type == FilterType.ATTR: filtered_r = dpu.read_relation_on_copy(r_path + r.source_name) continue attribute = info[1] cell_value = info[0] filtered_r = dpu.apply_filter(r_path + r.source_name, attribute, cell_value) if len(filtered_r) == 0: is_join_graph_valid = False else: filtered_r = dpu.read_relation_on_copy(r_path + r.source_name) else: filtered_r = local_intermediates[r.source_name] local_intermediates[r.source_name] = filtered_r joined = dpu.join_ab_on_key(filtered_l, filtered_r, l.field_name, r.field_name, suffix_str='_x') if len(joined) == 0: is_join_graph_valid = False is_join_graph_valid = True else: is_join_graph_valid = True et_is_materializable = time.time() perf_stats['time_is_materializable'] += et_is_materializable - st_is_materializable if is_join_graph_valid: total_materializable_join_graphs += 1 materializable_join_graphs.append((jpg, filters)) st_materialize = time.time() to_return = [] for (mjg, filters) in materializable_join_graphs: attrs_to_project = dpu.obtain_attributes_to_project(filters) materialized_virtual_schema = dpu.materialize_join_graph_sample(mjg, self, sample_size=1000) if materialized_virtual_schema is False: continue view_metadata = dict() view_metadata['#join_graphs'] = len(materializable_join_graphs) view_metadata['join_graph'] = self.format_join_graph_into_nodes_edges(mjg) to_return.append((materialized_virtual_schema, attrs_to_project, view_metadata)) to_return = to_return et_materialize = time.time() perf_stats['time_materialize'] += et_materialize - st_materialize for el in to_return: if 'actually_materialized' not in perf_stats: perf_stats['actually_materialized'] = 0 perf_stats['actually_materialized'] += 1 yield el if 'materializable_join_graphs' not in perf_stats: perf_stats['materializable_join_graphs'] = [] perf_stats['materializable_join_graphs'].append(total_materializable_join_graphs) perf_stats['num_candidate_groups'] = num_candidate_groups print('Finished enumerating groups') cache_unjoinable_pairs = OrderedDict(sorted(cache_unjoinable_pairs.items(), key=lambda x: x[1], reverse=True)) for (k, v) in cache_unjoinable_pairs.items(): print(str(k) + ' => ' + str(v))
aurum-datadiscovery
positive
def test_upload_file_above_threshold_with_ssec(self): key_bytes = os.urandom(32) extra_args = {'SSECustomerKey': key_bytes, 'SSECustomerAlgorithm': 'AES256'} config = boto3.s3.transfer.TransferConfig(multipart_threshold=5 * 1024 * 1024) <DeepExtract> transfer = boto3.s3.transfer.S3Transfer(self.client, config=config) </DeepExtract> filename = self.files.create_file_with_size('6mb.txt', filesize=6 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') response = self.client.head_object(Bucket=self.bucket_name, Key='6mb.txt', **extra_args) self.assertEqual(response['SSECustomerAlgorithm'], 'AES256')
def test_upload_file_above_threshold_with_ssec(self): key_bytes = os.urandom(32) extra_args = {'SSECustomerKey': key_bytes, 'SSECustomerAlgorithm': 'AES256'} config = boto3.s3.transfer.TransferConfig(multipart_threshold=5 * 1024 * 1024) transfer = boto3.s3.transfer.S3Transfer(self.client, config=config) filename = self.files.create_file_with_size('6mb.txt', filesize=6 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '6mb.txt', extra_args=extra_args) self.addCleanup(self.delete_object, '6mb.txt') response = self.client.head_object(Bucket=self.bucket_name, Key='6mb.txt', **extra_args) self.assertEqual(response['SSECustomerAlgorithm'], 'AES256')
boto3
positive
def derivative_plot(derivative_result, combine_plots_in_grid=True, template=PLOTLY_TEMPLATE, palette=PLOTLY_PALETTE): """Plot evaluations and derivative estimates. The resulting grid plot displays function evaluations and derivatives. The derivatives are visualized as a first-order Taylor approximation. Bands are drawn indicating the area in which forward and backward derivatives are located. This is done by filling the area between the derivative estimate with lowest and highest step size, respectively. Do not confuse these bands with statistical errors. This function does not require the params vector as plots are displayed relative to the point at which the derivative is calculated. Args: derivative_result (dict): The result dictionary of call to :func:`~estimagic.differentiation.derivatives.first_derivative` with return_info and return_func_value set to True. combine_plots_in_grid (bool): decide whether to return a one figure containing subplots for each factor pair or a dictionary of individual plots. Default True. template (str): The template for the figure. Default is "plotly_white". palette: The coloring palette for traces. Default is "qualitative.Plotly". Returns: plotly.Figure: The grid plot or dict of individual plots """ func_value = derivative_result['func_value'] func_evals = derivative_result['func_evals'] derivative_candidates = derivative_result['derivative_candidates'] df = func_evals.reset_index() df = df.assign(step=df.step * df.sign) func_evals = df.set_index(['sign', 'step_number', 'dim_x', 'dim_f']) <DeepExtract> given = ['method'] if given_method else [] minimizer = derivative_candidates.groupby([*given, 'dim_x', 'dim_f'])['err'].idxmin() df = derivative_candidates.loc[minimizer]['der'] index_level_to_drop = list({'method', 'num_term'} - set(given)) df = df.droplevel(index_level_to_drop).copy() df_der = df </DeepExtract> <DeepExtract> given = ['method'] if True else [] minimizer = derivative_candidates.groupby([*given, 'dim_x', 'dim_f'])['err'].idxmin() df = derivative_candidates.loc[minimizer]['der'] index_level_to_drop = list({'method', 'num_term'} - set(given)) df = df.droplevel(index_level_to_drop).copy() df_der_method = df </DeepExtract> grid_points = 2 func_value = np.atleast_1d(func_value) max_steps = df.groupby('dim_x')['step'].max() dim_x = range(df['dim_x'].max() + 1) dim_f = range(df['dim_f'].max() + 1) titles = [] x_axis = [] g_list = [] for (row, col) in itertools.product(dim_x, dim_f): g_ind = [] y0 = func_value[col] x_grid = np.linspace(-max_steps[row], max_steps[row], grid_points) y0 = func_value[col] x_grid = np.linspace(-max_steps[row], max_steps[row], grid_points) _scatter_data = func_evals.query('dim_x == @row & dim_f == @col') trace_func_evals = go.Scatter(x=_scatter_data['step'], y=_scatter_data['eval'], mode='markers', name='Function Evaluation', legendgroup=1, marker={'color': 'black'}) g_ind.append(trace_func_evals) for (i, method) in enumerate(['forward', 'central', 'backward']): _y = y0 + x_grid * df_der_method.loc[method, row, col] trace_method = go.Scatter(x=x_grid, y=_y, mode='lines', name=method, legendgroup=2 + i, line={'color': palette[i], 'width': 5}) g_ind.append(trace_method) for (sign, cmap_id) in zip([1, -1], [0, 2]): <DeepExtract> df = func_evals.loc[(sign, slice(None), row, col), ['step', 'eval']] df = df.dropna().sort_index() out = pd.concat([df.head(1), df.tail(1)]).to_numpy() _x_y = out </DeepExtract> diff = _x_y - np.array([0, y0]) slope = diff[:, 1] / diff[:, 0] _y = y0 + x_grid * slope.reshape(-1, 1) trace_fill_lines = go.Scatter(x=x_grid, y=_y[0, :], mode='lines', line={'color': palette[cmap_id], 'width': 1}, showlegend=False) g_ind.append(trace_fill_lines) trace_fill_area = go.Scatter(x=x_grid, y=_y[1, :], mode='lines', line={'color': palette[cmap_id], 'width': 1}, fill='tonexty') g_ind.append(trace_fill_area) _y = y0 + x_grid * df_der.loc[row, col] trace_best_estimate = go.Scatter(x=x_grid, y=_y, mode='lines', name='Best Estimate', legendgroup=2, line={'color': 'black', 'width': 2}) g_ind.append(trace_best_estimate) x_axis.append(f'Value relative to x<sub>{(0, row)}</sub>') titles.append(f'dim_x, dim_f = {(row, col)}') g_list.append(g_ind) common_dependencies = {'ind_list': g_list, 'names': titles, 'clean_legend': True, 'scientific_notation': True, 'x_title': x_axis} common_layout = {'template': template, 'margin': {'l': 10, 'r': 10, 't': 30, 'b': 10}} if combine_plots_in_grid: g = create_grid_plot(rows=len(dim_x), cols=len(dim_f), **common_dependencies, kws={'height': 300 * len(dim_x), 'width': 500 * len(dim_f), **common_layout}) out = g else: ind_dict = create_ind_dict(**common_dependencies, kws={'height': 300, 'width': 500, 'title_x': 0.5, **common_layout}) out = ind_dict return out
def derivative_plot(derivative_result, combine_plots_in_grid=True, template=PLOTLY_TEMPLATE, palette=PLOTLY_PALETTE): """Plot evaluations and derivative estimates. The resulting grid plot displays function evaluations and derivatives. The derivatives are visualized as a first-order Taylor approximation. Bands are drawn indicating the area in which forward and backward derivatives are located. This is done by filling the area between the derivative estimate with lowest and highest step size, respectively. Do not confuse these bands with statistical errors. This function does not require the params vector as plots are displayed relative to the point at which the derivative is calculated. Args: derivative_result (dict): The result dictionary of call to :func:`~estimagic.differentiation.derivatives.first_derivative` with return_info and return_func_value set to True. combine_plots_in_grid (bool): decide whether to return a one figure containing subplots for each factor pair or a dictionary of individual plots. Default True. template (str): The template for the figure. Default is "plotly_white". palette: The coloring palette for traces. Default is "qualitative.Plotly". Returns: plotly.Figure: The grid plot or dict of individual plots """ func_value = derivative_result['func_value'] func_evals = derivative_result['func_evals'] derivative_candidates = derivative_result['derivative_candidates'] df = func_evals.reset_index() df = df.assign(step=df.step * df.sign) func_evals = df.set_index(['sign', 'step_number', 'dim_x', 'dim_f']) given = ['method'] if given_method else [] minimizer = derivative_candidates.groupby([*given, 'dim_x', 'dim_f'])['err'].idxmin() df = derivative_candidates.loc[minimizer]['der'] index_level_to_drop = list({'method', 'num_term'} - set(given)) df = df.droplevel(index_level_to_drop).copy() df_der = df given = ['method'] if True else [] minimizer = derivative_candidates.groupby([*given, 'dim_x', 'dim_f'])['err'].idxmin() df = derivative_candidates.loc[minimizer]['der'] index_level_to_drop = list({'method', 'num_term'} - set(given)) df = df.droplevel(index_level_to_drop).copy() df_der_method = df grid_points = 2 func_value = np.atleast_1d(func_value) max_steps = df.groupby('dim_x')['step'].max() dim_x = range(df['dim_x'].max() + 1) dim_f = range(df['dim_f'].max() + 1) titles = [] x_axis = [] g_list = [] for (row, col) in itertools.product(dim_x, dim_f): g_ind = [] y0 = func_value[col] x_grid = np.linspace(-max_steps[row], max_steps[row], grid_points) y0 = func_value[col] x_grid = np.linspace(-max_steps[row], max_steps[row], grid_points) _scatter_data = func_evals.query('dim_x == @row & dim_f == @col') trace_func_evals = go.Scatter(x=_scatter_data['step'], y=_scatter_data['eval'], mode='markers', name='Function Evaluation', legendgroup=1, marker={'color': 'black'}) g_ind.append(trace_func_evals) for (i, method) in enumerate(['forward', 'central', 'backward']): _y = y0 + x_grid * df_der_method.loc[method, row, col] trace_method = go.Scatter(x=x_grid, y=_y, mode='lines', name=method, legendgroup=2 + i, line={'color': palette[i], 'width': 5}) g_ind.append(trace_method) for (sign, cmap_id) in zip([1, -1], [0, 2]): df = func_evals.loc[(sign, slice(None), row, col), ['step', 'eval']] df = df.dropna().sort_index() out = pd.concat([df.head(1), df.tail(1)]).to_numpy() _x_y = out diff = _x_y - np.array([0, y0]) slope = diff[:, 1] / diff[:, 0] _y = y0 + x_grid * slope.reshape(-1, 1) trace_fill_lines = go.Scatter(x=x_grid, y=_y[0, :], mode='lines', line={'color': palette[cmap_id], 'width': 1}, showlegend=False) g_ind.append(trace_fill_lines) trace_fill_area = go.Scatter(x=x_grid, y=_y[1, :], mode='lines', line={'color': palette[cmap_id], 'width': 1}, fill='tonexty') g_ind.append(trace_fill_area) _y = y0 + x_grid * df_der.loc[row, col] trace_best_estimate = go.Scatter(x=x_grid, y=_y, mode='lines', name='Best Estimate', legendgroup=2, line={'color': 'black', 'width': 2}) g_ind.append(trace_best_estimate) x_axis.append(f'Value relative to x<sub>{(0, row)}</sub>') titles.append(f'dim_x, dim_f = {(row, col)}') g_list.append(g_ind) common_dependencies = {'ind_list': g_list, 'names': titles, 'clean_legend': True, 'scientific_notation': True, 'x_title': x_axis} common_layout = {'template': template, 'margin': {'l': 10, 'r': 10, 't': 30, 'b': 10}} if combine_plots_in_grid: g = create_grid_plot(rows=len(dim_x), cols=len(dim_f), **common_dependencies, kws={'height': 300 * len(dim_x), 'width': 500 * len(dim_f), **common_layout}) out = g else: ind_dict = create_ind_dict(**common_dependencies, kws={'height': 300, 'width': 500, 'title_x': 0.5, **common_layout}) out = ind_dict return out
estimagic
positive
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(examples))) <DeepExtract> if isinstance(example, PaddingInputExample): feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) elif len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:max_seq_length - 2] tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info('*** Example ***') tf.logging.info('guid: %s' % example.guid) tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])) tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) tf.logging.info('label: %s (id = %d)' % (example.label, label_id)) feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) feature = feature </DeepExtract> features.append(feature) return features
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info('Writing example %d of %d' % (ex_index, len(examples))) if isinstance(example, PaddingInputExample): feature = InputFeatures(input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) elif len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:max_seq_length - 2] tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info('*** Example ***') tf.logging.info('guid: %s' % example.guid) tf.logging.info('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in tokens])) tf.logging.info('input_ids: %s' % ' '.join([str(x) for x in input_ids])) tf.logging.info('input_mask: %s' % ' '.join([str(x) for x in input_mask])) tf.logging.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])) tf.logging.info('label: %s (id = %d)' % (example.label, label_id)) feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) feature = feature features.append(feature) return features
BERT-for-Sequence-Labeling-and-Text-Classification
positive
def insert(self, key): pCrawl = self.root length = len(key) for level in range(length): <DeepExtract> index = ord(key[level]) - ord('a') </DeepExtract> if not pCrawl.children[index]: <DeepExtract> pCrawl.children[index] = TrieNode() </DeepExtract> pCrawl = pCrawl.children[index] pCrawl.isEndOfWord = True
def insert(self, key): pCrawl = self.root length = len(key) for level in range(length): index = ord(key[level]) - ord('a') if not pCrawl.children[index]: pCrawl.children[index] = TrieNode() pCrawl = pCrawl.children[index] pCrawl.isEndOfWord = True
DSA
positive
def main(): Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO Config.backend = Backends.TORCH Config.cuda = True Config.dropout = 0.1 Config.hidden_size = 128 Config.embedding_size = 256 Config.L2 = 3e-05 do_process = False if do_process: <DeepExtract> tokenizer = nltk.tokenize.WordPunctTokenizer() zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0') file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl'] not_t = [] t = ['input', 'support', 'target'] p = Pipeline('snli_example', True) p.add_path(join(zip_path, file_paths[0])) p.add_line_processor(JsonLoaderProcessors()) p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p.add_sent_processor(ToLower()) p.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p.add_token_processor(AddToVocab()) p.add_post_processor(SaveLengthsToState()) p.execute() p.clear_processors() p.state['vocab'].save_to_disk() p.add_sent_processor(ToLower()) p.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p.add_post_processor(ConvertTokenToIdx()) p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128)) state = p.execute() p2 = Pipeline('snli_example') p2.copy_vocab_from_pipeline(p) p2.add_path(join(zip_path, file_paths[1])) p2.add_line_processor(JsonLoaderProcessors()) p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p2.add_sent_processor(ToLower()) p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p2.add_post_processor(SaveLengthsToState()) p2.execute() p2.clear_processors() p2.add_sent_processor(ToLower()) p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p2.add_post_processor(ConvertTokenToIdx()) p2.add_post_processor(StreamToHDF5('snli_dev')) p2.execute() p3 = Pipeline('snli_example') p3.copy_vocab_from_pipeline(p) p3.add_path(join(zip_path, file_paths[2])) p3.add_line_processor(JsonLoaderProcessors()) p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p3.add_sent_processor(ToLower()) p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p3.add_post_processor(SaveLengthsToState()) p3.execute() p3.clear_processors() p3.add_sent_processor(ToLower()) p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p3.add_post_processor(ConvertTokenToIdx()) p3.add_post_processor(StreamToHDF5('snli_test')) p3.execute() </DeepExtract> p = Pipeline('snli_example') vocab = p.state['vocab'] vocab.load_from_disk() batch_size = 128 if Config.backend == Backends.TENSORFLOW: from spodernet.backends.tfbackend import TensorFlowConfig TensorFlowConfig.init_batch_size(batch_size) train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8) dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size) test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size) train_batcher.subscribe_to_events(LossHook('Train', print_every_x_batches=100)) train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=100)) dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=100)) dev_batcher.subscribe_to_events(LossHook('Dev', print_every_x_batches=100)) eta = ETAHook(print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) net = Net(vocab.num_embeddings, vocab.num_labels) if Config.cuda: net.cuda() epochs = 10 opt = torch.optim.Adam(net.parameters(), lr=0.001) net.train() for epoch in range(epochs): for str2var in train_batcher: opt.zero_grad() (loss, argmax) = net(str2var) loss.backward() opt.step() train_batcher.state.loss = loss train_batcher.state.targets = str2var['target'] train_batcher.state.argmax = argmax net.eval() for (i, str2var) in enumerate(dev_batcher): t = str2var['target'] idx = str2var['index'] (loss, argmax) = net(str2var) dev_batcher.state.loss = loss dev_batcher.state.targets = str2var['target'] dev_batcher.state.argmax = argmax
def main(): Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO Config.backend = Backends.TORCH Config.cuda = True Config.dropout = 0.1 Config.hidden_size = 128 Config.embedding_size = 256 Config.L2 = 3e-05 do_process = False if do_process: tokenizer = nltk.tokenize.WordPunctTokenizer() zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0') file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl'] not_t = [] t = ['input', 'support', 'target'] p = Pipeline('snli_example', True) p.add_path(join(zip_path, file_paths[0])) p.add_line_processor(JsonLoaderProcessors()) p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p.add_sent_processor(ToLower()) p.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p.add_token_processor(AddToVocab()) p.add_post_processor(SaveLengthsToState()) p.execute() p.clear_processors() p.state['vocab'].save_to_disk() p.add_sent_processor(ToLower()) p.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p.add_post_processor(ConvertTokenToIdx()) p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128)) state = p.execute() p2 = Pipeline('snli_example') p2.copy_vocab_from_pipeline(p) p2.add_path(join(zip_path, file_paths[1])) p2.add_line_processor(JsonLoaderProcessors()) p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p2.add_sent_processor(ToLower()) p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p2.add_post_processor(SaveLengthsToState()) p2.execute() p2.clear_processors() p2.add_sent_processor(ToLower()) p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p2.add_post_processor(ConvertTokenToIdx()) p2.add_post_processor(StreamToHDF5('snli_dev')) p2.execute() p3 = Pipeline('snli_example') p3.copy_vocab_from_pipeline(p) p3.add_path(join(zip_path, file_paths[2])) p3.add_line_processor(JsonLoaderProcessors()) p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-')) p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label'])) p3.add_sent_processor(ToLower()) p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p3.add_post_processor(SaveLengthsToState()) p3.execute() p3.clear_processors() p3.add_sent_processor(ToLower()) p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t) p3.add_post_processor(ConvertTokenToIdx()) p3.add_post_processor(StreamToHDF5('snli_test')) p3.execute() p = Pipeline('snli_example') vocab = p.state['vocab'] vocab.load_from_disk() batch_size = 128 if Config.backend == Backends.TENSORFLOW: from spodernet.backends.tfbackend import TensorFlowConfig TensorFlowConfig.init_batch_size(batch_size) train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8) dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size) test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size) train_batcher.subscribe_to_events(LossHook('Train', print_every_x_batches=100)) train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=100)) dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=100)) dev_batcher.subscribe_to_events(LossHook('Dev', print_every_x_batches=100)) eta = ETAHook(print_every_x_batches=100) train_batcher.subscribe_to_events(eta) train_batcher.subscribe_to_start_of_epoch_event(eta) net = Net(vocab.num_embeddings, vocab.num_labels) if Config.cuda: net.cuda() epochs = 10 opt = torch.optim.Adam(net.parameters(), lr=0.001) net.train() for epoch in range(epochs): for str2var in train_batcher: opt.zero_grad() (loss, argmax) = net(str2var) loss.backward() opt.step() train_batcher.state.loss = loss train_batcher.state.targets = str2var['target'] train_batcher.state.argmax = argmax net.eval() for (i, str2var) in enumerate(dev_batcher): t = str2var['target'] idx = str2var['index'] (loss, argmax) = net(str2var) dev_batcher.state.loss = loss dev_batcher.state.targets = str2var['target'] dev_batcher.state.argmax = argmax
CPL
positive
def test_deserialize_datetime_with_custom_format(self): from iso8601 import iso8601 fmt = '%Y%m%d.%H%M%S' <DeepExtract> from colander import Invalid exc = Invalid(node, msg, val) typ = exc </DeepExtract> <DeepExtract> import datetime dt = datetime.datetime(2010, 4, 26, 10, 48) </DeepExtract> tzinfo = iso8601.FixedOffset(1, 0, 'myname') dt = dt.replace(tzinfo=tzinfo) expected = dt.strftime(fmt) node = DummySchemaNode(None) result = typ.deserialize(node, expected) self.assertEqual(result.strftime(fmt), expected)
def test_deserialize_datetime_with_custom_format(self): from iso8601 import iso8601 fmt = '%Y%m%d.%H%M%S' from colander import Invalid exc = Invalid(node, msg, val) typ = exc import datetime dt = datetime.datetime(2010, 4, 26, 10, 48) tzinfo = iso8601.FixedOffset(1, 0, 'myname') dt = dt.replace(tzinfo=tzinfo) expected = dt.strftime(fmt) node = DummySchemaNode(None) result = typ.deserialize(node, expected) self.assertEqual(result.strftime(fmt), expected)
colander
positive
def attack(self, features, adj, labels, target_node, n_perturbations, direct=True, n_influencers=3, **kwargs): """Generate perturbations on the input graph. Parameters ---------- features : Original (unperturbed) node feature matrix adj : Original (unperturbed) adjacency matrix labels : node labels target_node : int target_node node index to be attacked n_perturbations : int Number of perturbations on the input graph. Perturbations could be edge removals/additions or feature removals/additions. direct: bool whether to conduct direct attack n_influencers : int number of the top influencers to choose. For direct attack, it will set as `n_perturbations`. """ if sp.issparse(features): features = features.A if not torch.is_tensor(features): features = torch.tensor(features, device=self.device) if torch.is_tensor(adj): adj = utils.to_scipy(adj).csr() self.modified_features = features.requires_grad_(bool(self.attack_features)) target_label = torch.LongTensor([labels[target_node]]) best_wrong_label = torch.LongTensor([(self.logits[target_node].cpu() - 1000 * torch.eye(self.logits.size(1))[target_label]).argmax()]) self.selfloop_degree = torch.tensor(adj.sum(1).A1 + 1, device=self.device) self.target_label = target_label.to(self.device) self.best_wrong_label = best_wrong_label.to(self.device) self.n_perturbations = n_perturbations self.ori_adj = adj self.target_node = target_node self.direct = direct attacker_nodes = torch.where(torch.as_tensor(labels) == best_wrong_label)[0] <DeepExtract> target_node = self.target_node neighbors = self.ori_adj[target_node].indices (sub_nodes, sub_edges) = self.ego_subgraph() if self.direct or n_influencers is not None: influencers = [target_node] attacker_nodes = np.setdiff1d(attacker_nodes, neighbors) else: influencers = neighbors subgraph = self.subgraph_processing(influencers, attacker_nodes, sub_nodes, sub_edges) if n_influencers is not None and self.attack_structure: if self.direct: influencers = [target_node] attacker_nodes = self.get_topk_influencers(subgraph, k=self.n_perturbations + 1) else: influencers = neighbors attacker_nodes = self.get_topk_influencers(subgraph, k=n_influencers) subgraph = self.subgraph_processing(influencers, attacker_nodes, sub_nodes, sub_edges) subgraph = subgraph </DeepExtract> if not direct: mask = torch.logical_or(subgraph.edge_index[0] == target_node, subgraph.edge_index[1] == target_node).to(self.device) structure_perturbations = [] feature_perturbations = [] num_features = features.shape[-1] for _ in range(n_perturbations): <DeepExtract> if self.attack_structure: edge_weight = subgraph.edge_weight non_edge_weight = subgraph.non_edge_weight self_loop_weight = subgraph.self_loop_weight weights = torch.cat([edge_weight, edge_weight, non_edge_weight, non_edge_weight, self_loop_weight], dim=0) else: weights = subgraph.edge_weight weights = self.gcn_norm(subgraph.edges_all, weights, self.selfloop_degree) logit = self.SGCCov(self.compute_XW(), subgraph.edges_all, weights) logit = logit[self.target_node] if self.bias is not None: logit += self.bias logit = F.log_softmax(logit.view(1, -1) / eps, dim=1) loss = F.nll_loss(logit, self.target_label) - F.nll_loss(logit, self.best_wrong_label) edge_grad = non_edge_grad = features_grad = None if self.attack_structure and self.attack_features: (edge_grad, non_edge_grad, features_grad) = torch.autograd.grad(loss, [edge_weight, non_edge_weight, self.modified_features], create_graph=False) elif self.attack_structure: (edge_grad, non_edge_grad) = torch.autograd.grad(loss, [edge_weight, non_edge_weight], create_graph=False) else: features_grad = torch.autograd.grad(loss, self.modified_features, create_graph=False)[0] if self.attack_features: self.compute_XW.cache_clear() (edge_grad, non_edge_grad, features_grad) = (edge_grad, non_edge_grad, features_grad) </DeepExtract> max_structure_score = max_feature_score = 0.0 if self.attack_structure: edge_grad *= -2 * subgraph.edge_weight + 1 non_edge_grad *= -2 * subgraph.non_edge_weight + 1 min_grad = min(edge_grad.min().item(), non_edge_grad.min().item()) edge_grad -= min_grad non_edge_grad -= min_grad if not direct: edge_grad[mask] = 0.0 (max_edge_grad, max_edge_idx) = torch.max(edge_grad, dim=0) (max_non_edge_grad, max_non_edge_idx) = torch.max(non_edge_grad, dim=0) max_structure_score = max(max_edge_grad.item(), max_non_edge_grad.item()) if self.attack_features: features_grad *= -2 * self.modified_features + 1 features_grad -= features_grad.min() if not direct: features_grad[target_node] = 0.0 (max_feature_grad, max_feature_idx) = torch.max(features_grad.view(-1), dim=0) max_feature_score = max_feature_grad.item() if max_structure_score >= max_feature_score: if max_edge_grad > max_non_edge_grad: best_edge = subgraph.edge_index[:, max_edge_idx] subgraph.edge_weight.data[max_edge_idx] = 0.0 self.selfloop_degree[best_edge] -= 1.0 else: best_edge = subgraph.non_edge_index[:, max_non_edge_idx] subgraph.non_edge_weight.data[max_non_edge_idx] = 1.0 self.selfloop_degree[best_edge] += 1.0 (u, v) = best_edge.tolist() structure_perturbations.append((u, v)) else: (u, v) = divmod(max_feature_idx.item(), num_features) feature_perturbations.append((u, v)) self.modified_features[u, v].data.fill_(1.0 - self.modified_features[u, v].data) if structure_perturbations: modified_adj = adj.tolil(copy=True) (row, col) = list(zip(*structure_perturbations)) modified_adj[row, col] = modified_adj[col, row] = 1 - modified_adj[row, col].A modified_adj = modified_adj.tocsr(copy=False) modified_adj.eliminate_zeros() else: modified_adj = adj.copy() self.modified_adj = modified_adj self.modified_features = self.modified_features.detach().cpu().numpy() self.structure_perturbations = structure_perturbations self.feature_perturbations = feature_perturbations
def attack(self, features, adj, labels, target_node, n_perturbations, direct=True, n_influencers=3, **kwargs): """Generate perturbations on the input graph. Parameters ---------- features : Original (unperturbed) node feature matrix adj : Original (unperturbed) adjacency matrix labels : node labels target_node : int target_node node index to be attacked n_perturbations : int Number of perturbations on the input graph. Perturbations could be edge removals/additions or feature removals/additions. direct: bool whether to conduct direct attack n_influencers : int number of the top influencers to choose. For direct attack, it will set as `n_perturbations`. """ if sp.issparse(features): features = features.A if not torch.is_tensor(features): features = torch.tensor(features, device=self.device) if torch.is_tensor(adj): adj = utils.to_scipy(adj).csr() self.modified_features = features.requires_grad_(bool(self.attack_features)) target_label = torch.LongTensor([labels[target_node]]) best_wrong_label = torch.LongTensor([(self.logits[target_node].cpu() - 1000 * torch.eye(self.logits.size(1))[target_label]).argmax()]) self.selfloop_degree = torch.tensor(adj.sum(1).A1 + 1, device=self.device) self.target_label = target_label.to(self.device) self.best_wrong_label = best_wrong_label.to(self.device) self.n_perturbations = n_perturbations self.ori_adj = adj self.target_node = target_node self.direct = direct attacker_nodes = torch.where(torch.as_tensor(labels) == best_wrong_label)[0] target_node = self.target_node neighbors = self.ori_adj[target_node].indices (sub_nodes, sub_edges) = self.ego_subgraph() if self.direct or n_influencers is not None: influencers = [target_node] attacker_nodes = np.setdiff1d(attacker_nodes, neighbors) else: influencers = neighbors subgraph = self.subgraph_processing(influencers, attacker_nodes, sub_nodes, sub_edges) if n_influencers is not None and self.attack_structure: if self.direct: influencers = [target_node] attacker_nodes = self.get_topk_influencers(subgraph, k=self.n_perturbations + 1) else: influencers = neighbors attacker_nodes = self.get_topk_influencers(subgraph, k=n_influencers) subgraph = self.subgraph_processing(influencers, attacker_nodes, sub_nodes, sub_edges) subgraph = subgraph if not direct: mask = torch.logical_or(subgraph.edge_index[0] == target_node, subgraph.edge_index[1] == target_node).to(self.device) structure_perturbations = [] feature_perturbations = [] num_features = features.shape[-1] for _ in range(n_perturbations): if self.attack_structure: edge_weight = subgraph.edge_weight non_edge_weight = subgraph.non_edge_weight self_loop_weight = subgraph.self_loop_weight weights = torch.cat([edge_weight, edge_weight, non_edge_weight, non_edge_weight, self_loop_weight], dim=0) else: weights = subgraph.edge_weight weights = self.gcn_norm(subgraph.edges_all, weights, self.selfloop_degree) logit = self.SGCCov(self.compute_XW(), subgraph.edges_all, weights) logit = logit[self.target_node] if self.bias is not None: logit += self.bias logit = F.log_softmax(logit.view(1, -1) / eps, dim=1) loss = F.nll_loss(logit, self.target_label) - F.nll_loss(logit, self.best_wrong_label) edge_grad = non_edge_grad = features_grad = None if self.attack_structure and self.attack_features: (edge_grad, non_edge_grad, features_grad) = torch.autograd.grad(loss, [edge_weight, non_edge_weight, self.modified_features], create_graph=False) elif self.attack_structure: (edge_grad, non_edge_grad) = torch.autograd.grad(loss, [edge_weight, non_edge_weight], create_graph=False) else: features_grad = torch.autograd.grad(loss, self.modified_features, create_graph=False)[0] if self.attack_features: self.compute_XW.cache_clear() (edge_grad, non_edge_grad, features_grad) = (edge_grad, non_edge_grad, features_grad) max_structure_score = max_feature_score = 0.0 if self.attack_structure: edge_grad *= -2 * subgraph.edge_weight + 1 non_edge_grad *= -2 * subgraph.non_edge_weight + 1 min_grad = min(edge_grad.min().item(), non_edge_grad.min().item()) edge_grad -= min_grad non_edge_grad -= min_grad if not direct: edge_grad[mask] = 0.0 (max_edge_grad, max_edge_idx) = torch.max(edge_grad, dim=0) (max_non_edge_grad, max_non_edge_idx) = torch.max(non_edge_grad, dim=0) max_structure_score = max(max_edge_grad.item(), max_non_edge_grad.item()) if self.attack_features: features_grad *= -2 * self.modified_features + 1 features_grad -= features_grad.min() if not direct: features_grad[target_node] = 0.0 (max_feature_grad, max_feature_idx) = torch.max(features_grad.view(-1), dim=0) max_feature_score = max_feature_grad.item() if max_structure_score >= max_feature_score: if max_edge_grad > max_non_edge_grad: best_edge = subgraph.edge_index[:, max_edge_idx] subgraph.edge_weight.data[max_edge_idx] = 0.0 self.selfloop_degree[best_edge] -= 1.0 else: best_edge = subgraph.non_edge_index[:, max_non_edge_idx] subgraph.non_edge_weight.data[max_non_edge_idx] = 1.0 self.selfloop_degree[best_edge] += 1.0 (u, v) = best_edge.tolist() structure_perturbations.append((u, v)) else: (u, v) = divmod(max_feature_idx.item(), num_features) feature_perturbations.append((u, v)) self.modified_features[u, v].data.fill_(1.0 - self.modified_features[u, v].data) if structure_perturbations: modified_adj = adj.tolil(copy=True) (row, col) = list(zip(*structure_perturbations)) modified_adj[row, col] = modified_adj[col, row] = 1 - modified_adj[row, col].A modified_adj = modified_adj.tocsr(copy=False) modified_adj.eliminate_zeros() else: modified_adj = adj.copy() self.modified_adj = modified_adj self.modified_features = self.modified_features.detach().cpu().numpy() self.structure_perturbations = structure_perturbations self.feature_perturbations = feature_perturbations
DeepRobust
positive
def connect(self, peer): if peer not in self.peers and peer != self.address: logger.info(f'(handshake) Sent "connect" to {peer[0]}') try: <DeepExtract> message = prepare_message('connect', None) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect(peer) s.sendall(serialize(message)) if response: return deserialize(s.recv(5000)) </DeepExtract> self.pending_peers.append(peer) except: logger.info(f'(handshake) Node "{peer[0]} offline"')
def connect(self, peer): if peer not in self.peers and peer != self.address: logger.info(f'(handshake) Sent "connect" to {peer[0]}') try: message = prepare_message('connect', None) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect(peer) s.sendall(serialize(message)) if response: return deserialize(s.recv(5000)) self.pending_peers.append(peer) except: logger.info(f'(handshake) Node "{peer[0]} offline"')
digital-cash
positive
def convert_adni_av45_fbb_pet(source_dir, csv_dir, dest_dir, conversion_dir, subjs_list=None, mod_to_update=False): """Convert AV-45 and Florbetaben PET images of ADNI into BIDS format. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory conversion_dir: path to the TSV files including the paths to original images subjs_list: subjects list mod_to_update: If True, pre-existing images in the BIDS directory will be erased and extracted again. """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids from clinica.utils.stream import cprint if not subjs_list: adni_merge_path = path.join(csv_dir, 'ADNIMERGE.csv') adni_merge = pd.read_csv(adni_merge_path, sep=',', low_memory=False) subjs_list = list(adni_merge.PTID.unique()) cprint(f'Calculating paths of AV45 and Florbetaben PET images. Output will be stored in {conversion_dir}.') <DeepExtract> from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, get_images_pet pet_amyloid_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer'] pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col) pet_amyloid_dfs_list = [] av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'), sep=',', low_memory=False) amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))] amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))] amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list()) amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False) sequences_preprocessing_step = ['AV45 Co-registered, Averaged', 'FBB Co-registered, Averaged'] subj_dfs_list = get_images_pet(subj, amyloid_qc_subj, subject_pet_meta, pet_amyloid_col, 'Amyloid-PET', sequences_preprocessing_step) if subj_dfs_list: pet_amyloid_dfs_list += subj_dfs_list if pet_amyloid_dfs_list: pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True) conversion_errors = [('128_S_2220', 'm48'), ('098_S_4275', 'm84')] if not pet_amyloid_df.empty: error_ind = pet_amyloid_df.index[pet_amyloid_df.apply(lambda x: (x.Subject_ID, x.VISCODE) in conversion_errors, axis=1)] pet_amyloid_df.drop(error_ind, inplace=True) images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I', 'Image_ID') images.to_csv(path.join(conversion_dir, 'amyloid_pet_paths.tsv'), sep='\t', index=False) images = images </DeepExtract> cprint('Paths of AV45 and Florbetaben PET images found. Exporting images into BIDS ...') paths_to_bids(images, dest_dir, 'av45_fbb', mod_to_update=mod_to_update) cprint(msg='AV45 and Florbetaben PET conversion done.', lvl='debug')
def convert_adni_av45_fbb_pet(source_dir, csv_dir, dest_dir, conversion_dir, subjs_list=None, mod_to_update=False): """Convert AV-45 and Florbetaben PET images of ADNI into BIDS format. Args: source_dir: path to the ADNI directory csv_dir: path to the clinical data directory dest_dir: path to the destination BIDS directory conversion_dir: path to the TSV files including the paths to original images subjs_list: subjects list mod_to_update: If True, pre-existing images in the BIDS directory will be erased and extracted again. """ from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids from clinica.utils.stream import cprint if not subjs_list: adni_merge_path = path.join(csv_dir, 'ADNIMERGE.csv') adni_merge = pd.read_csv(adni_merge_path, sep=',', low_memory=False) subjs_list = list(adni_merge.PTID.unique()) cprint(f'Calculating paths of AV45 and Florbetaben PET images. Output will be stored in {conversion_dir}.') from os import path import pandas as pd from clinica.iotools.converters.adni_to_bids.adni_utils import find_image_path, get_images_pet pet_amyloid_col = ['Phase', 'Subject_ID', 'VISCODE', 'Visit', 'Sequence', 'Scan_Date', 'Study_ID', 'Series_ID', 'Image_ID', 'Original', 'Tracer'] pet_amyloid_df = pd.DataFrame(columns=pet_amyloid_col) pet_amyloid_dfs_list = [] av45qc = pd.read_csv(path.join(csv_dir, 'AV45QC.csv'), sep=',', low_memory=False) amyqc = pd.read_csv(path.join(csv_dir, 'AMYQC.csv'), sep=',', low_memory=False) pet_meta_list = pd.read_csv(path.join(csv_dir, 'PET_META_LIST.csv'), sep=',', low_memory=False) for subj in subjs_list: subject_pet_meta = pet_meta_list[pet_meta_list['Subject'] == subj] if subject_pet_meta.empty: continue av45_qc_subj = av45qc[(av45qc.PASS == 1) & (av45qc.RID == int(subj[-4:]))] amy_qc_subj = amyqc[(amyqc.SCANQLTY == 1) & (amyqc.RID == int(subj[-4:]))] amy_qc_subj.insert(0, 'EXAMDATE', amy_qc_subj.SCANDATE.to_list()) amyloid_qc_subj = pd.concat([av45_qc_subj, amy_qc_subj], axis=0, ignore_index=True, sort=False) sequences_preprocessing_step = ['AV45 Co-registered, Averaged', 'FBB Co-registered, Averaged'] subj_dfs_list = get_images_pet(subj, amyloid_qc_subj, subject_pet_meta, pet_amyloid_col, 'Amyloid-PET', sequences_preprocessing_step) if subj_dfs_list: pet_amyloid_dfs_list += subj_dfs_list if pet_amyloid_dfs_list: pet_amyloid_df = pd.concat(pet_amyloid_dfs_list, ignore_index=True) conversion_errors = [('128_S_2220', 'm48'), ('098_S_4275', 'm84')] if not pet_amyloid_df.empty: error_ind = pet_amyloid_df.index[pet_amyloid_df.apply(lambda x: (x.Subject_ID, x.VISCODE) in conversion_errors, axis=1)] pet_amyloid_df.drop(error_ind, inplace=True) images = find_image_path(pet_amyloid_df, source_dir, 'Amyloid', 'I', 'Image_ID') images.to_csv(path.join(conversion_dir, 'amyloid_pet_paths.tsv'), sep='\t', index=False) images = images cprint('Paths of AV45 and Florbetaben PET images found. Exporting images into BIDS ...') paths_to_bids(images, dest_dir, 'av45_fbb', mod_to_update=mod_to_update) cprint(msg='AV45 and Florbetaben PET conversion done.', lvl='debug')
clinica
positive
def cycle_html(im_fnames_table, group_name, start): <DeepExtract> im_list_js = '[' + ', '.join([js_list(list(map(ut.quote, row))) for row in im_fnames_table]) + ']' </DeepExtract> id = str(random.random()) group_name = str(random.random()) if group_name is None else group_name default = im_fnames_table[start[0]][start[1]] if len(im_fnames_table) and len(im_fnames_table[0]) else '' return ut.frm('<img src = "%(default)s" id = "%(id)s" onmouseover = \'curr_cycle_group = "%(group_name)s";\' onclick = \'cycle("%(group_name)s", 1, 1, event.shiftKey, event.ctrlKey)\', oncontextmenu = \'cycle("%(group_name)s", 1, -1, event.shiftKey, event.ctrlKey); return false;\' onload = \'register_cycle("%(group_name)s", "%(id)s", %(im_list_js)s, %(repr(list(start)))s);\'>')
def cycle_html(im_fnames_table, group_name, start): im_list_js = '[' + ', '.join([js_list(list(map(ut.quote, row))) for row in im_fnames_table]) + ']' id = str(random.random()) group_name = str(random.random()) if group_name is None else group_name default = im_fnames_table[start[0]][start[1]] if len(im_fnames_table) and len(im_fnames_table[0]) else '' return ut.frm('<img src = "%(default)s" id = "%(id)s" onmouseover = \'curr_cycle_group = "%(group_name)s";\' onclick = \'cycle("%(group_name)s", 1, 1, event.shiftKey, event.ctrlKey)\', oncontextmenu = \'cycle("%(group_name)s", 1, -1, event.shiftKey, event.ctrlKey); return false;\' onload = \'register_cycle("%(group_name)s", "%(id)s", %(im_list_js)s, %(repr(list(start)))s);\'>')
avobjects
positive
def _set_dependencies_and_providers(self, manual_requirements=empty_dict, skip_unused_theories=False): components: List[Theory] = self.components direct_param_dependence: Dict[Theory, Set[str]] = {c: set() for c in components} def _tidy_requirements(_require, _component=None): if not _require: return [] <DeepExtract> if isinstance(_require, Mapping): _require = [Requirement(name, options) for (name, options) in _require.items()] elif isinstance(_require, str): _require = [Requirement(_require, None)] elif isinstance(_require, Iterable): if all((isinstance(term, str) for term in _require)): _require = [Requirement(name, None) for name in _require] result = [] for item in _require: if isinstance(item, Sequence) and len(item) == 2: result.append(Requirement(item[0], item[1])) else: break else: _require = result raise ValueError('Requirements must be a dict of names and options, a list of names, or an iterable of requirement (name, option) pairs') </DeepExtract> requirements_in_input_params = set((req.name for req in _require)).intersection(self.input_params) if requirements_in_input_params and _component is not None: direct_param_dependence[_component].update(requirements_in_input_params) return [_req for _req in _require if _req.name not in requirements_in_input_params] else: return _require requirements: Dict[Theory, List[Requirement]] = {} providers: Dict[str, List[Theory]] = {} requirements_are_params: Set[str] = set() for component in components: component.initialize_with_params() <DeepExtract> if not component.get_requirements(): requirements[component] = [] component.get_requirements() = as_requirement_list(component.get_requirements()) requirements_in_input_params = set((req.name for req in component.get_requirements())).intersection(self.input_params) if requirements_in_input_params and component is not None: direct_param_dependence[component].update(requirements_in_input_params) requirements[component] = [_req for _req in component.get_requirements() if _req.name not in requirements_in_input_params] else: requirements[component] = component.get_requirements() </DeepExtract> requirements[component] += [Requirement(p, None) for p in getattr(component, 'params', {}) or [] if p not in self.input_params and p not in component.output_params] can_provide = list(component.get_can_provide()) + list(component.get_can_provide_methods()) provide_params = set((p for p in component.get_can_provide_params() if all((p != req.name for req in requirements[component])))) provide_params.update(component.output_params) requirements_are_params.update(provide_params) for k in chain(can_provide, provide_params): providers[k] = providers.get(k, []) if component not in providers[k]: providers[k].append(component) manual_theory = Theory(name='_manual') if manual_requirements: self._manual_requirements = getattr(self, '_manual_requirements', []) + _tidy_requirements(manual_requirements) requirements[manual_theory] = deepcopy(self._manual_requirements) self._must_provide: Dict[Theory, List[Requirement]] = {c: [] for c in components} requirement_providers = {} dependencies: Dict[Theory, Set[Theory]] = {} used_suppliers = set((c for c in components if c.output_params)) there_are_more_requirements = True must_provide = {c: [Requirement(p, None) for p in c.output_params] for c in components} while there_are_more_requirements: for (component, requires) in requirements.items(): for requirement in requires: suppliers = providers.get(requirement.name) if not suppliers: requirements[manual_theory] = [req for req in requirements.get(manual_theory, []) if req.name != requirement.name] raise LoggedError(self.log, 'Requirement %s of %r is not provided by any component, nor sampled directly', requirement.name, component) supplier: Optional[Theory] if len(suppliers) == 1: supplier = suppliers[0] else: supplier = None for sup in suppliers: provide = str_to_list(getattr(sup, 'provides', [])) if requirement.name in provide: if supplier: raise LoggedError(self.log, 'more than one component provides %s', requirement.name) supplier = sup if not supplier: raise LoggedError(self.log, "requirement %s is provided by more than one component: %s. Use 'provides' keyword to specify which provides it", requirement.name, suppliers) if supplier is component: raise LoggedError(self.log, 'Component %r cannot provide %s to itself!', component, requirement.name) requirement_providers[requirement.name] = supplier.get_provider() used_suppliers.add(supplier) declared_requirements_for_this_supplier = self._must_provide[supplier] + must_provide[supplier] if requirement not in declared_requirements_for_this_supplier: must_provide[supplier] += [requirement] dependencies[component] = dependencies.get(component, set()) | {supplier} if component is not manual_theory and requirement.name not in component.input_params and (requirement.name in requirements_are_params): component.input_params_extra.add(requirement.name) there_are_more_requirements = False for (component, requires) in requirements.items(): requires[:] = [] for request in must_provide.get(component) or []: <DeepExtract> if not component.must_provide(**{request.name: request.options}): conditional_requirements = [] component.must_provide(**{request.name: request.options}) = as_requirement_list(component.must_provide(**{request.name: request.options})) requirements_in_input_params = set((req.name for req in component.must_provide(**{request.name: request.options}))).intersection(self.input_params) if requirements_in_input_params and component is not None: direct_param_dependence[component].update(requirements_in_input_params) conditional_requirements = [_req for _req in component.must_provide(**{request.name: request.options}) if _req.name not in requirements_in_input_params] else: conditional_requirements = component.must_provide(**{request.name: request.options}) </DeepExtract> self._must_provide[component].append(request) if conditional_requirements: there_are_more_requirements = True requires += conditional_requirements <DeepExtract> dependence_order: List[Theory] = [] deps = {p: s.copy() for (p, s) in dependencies.items()} comps = [c for c in components if not isinstance(c, AbsorbUnusedParamsLikelihood)] target_length = len(comps) _last = 0 while len(dependence_order) < target_length: for component in list(comps): if not deps.get(component): dependence_order.append(component) comps.remove(component) for (p, dep) in deps.items(): dep.discard(component) if len(dependence_order) == _last: raise LoggedError(self.log, 'Circular dependency, cannot calculate %r' % comps) _last = len(dependence_order) likes = list(self.likelihood.values()) self._component_order = {c: likes.index(c) if c in likes else None for c in dependence_order} </DeepExtract> must_provide = {c: [] for c in components} requirements.pop(manual_theory, None) if self._unassigned_input: self._unassigned_input.difference_update(*direct_param_dependence.values()) if self._unassigned_input: unassigned = self._unassigned_input - self.prior.external_dependence if unassigned: raise LoggedError(self.log, 'Could not find anything to use input parameter(s) %r.', unassigned) else: self.mpi_warning('Parameter(s) %s are only used by the prior', self._unassigned_input) unused_theories = set(self.theory.values()) - used_suppliers if unused_theories: if skip_unused_theories: self.mpi_debug('Theories %s do not need to be computed and will be skipped', unused_theories) for theory in unused_theories: self._component_order.pop(theory, None) components.remove(theory) else: self.mpi_warning('Theories %s do not appear to be actually used for anything', unused_theories) self.mpi_debug('Components will be computed in the order:') self.mpi_debug(' - %r' % list(self._component_order)) def dependencies_of(_component): deps = set() for c in dependencies.get(_component, []): deps.add(c) deps.update(dependencies_of(c)) return deps self._dependencies = {c: dependencies_of(c) for c in components} self._params_of_dependencies: List[Set[str]] = [set() for _ in self._component_order] for (component, param_dep) in zip(self._component_order, self._params_of_dependencies): param_dep.update(direct_param_dependence.get(component) or []) for dep in self._dependencies.get(component, []): param_dep.update(set(dep.input_params).union(direct_param_dependence.get(dep) or [])) param_dep -= set(component.input_params) if not len(component.input_params) and (not param_dep) and (component.get_name() != 'one'): raise LoggedError(self.log, "Component '%r' seems not to depend on any parameters (neither directly nor indirectly)", component) sampled_input_dependence = self.parameterization.sampled_input_dependence() sampled_dependence: Dict[str, List[Theory]] = {p: [] for p in sampled_input_dependence} for (p, i_s) in sampled_input_dependence.items(): for component in components: if p in component.input_params or (i_s and any((p_i in component.input_params for p_i in i_s))): sampled_dependence[p].append(component) for comp in components: if comp is not component and component in self._dependencies.get(comp, []): sampled_dependence[p].append(comp) self.sampled_dependence = sampled_dependence self.requires_derived: Set[str] = requirements_are_params.intersection(requirement_providers) if self.is_debug_and_mpi_root(): if requirement_providers: self.log.debug('Requirements will be calculated by these components:') for (req, provider) in requirement_providers.items(): self.log.debug('- %s: %s', req, provider) else: self.log.debug('No requirements need to be computed') self.provider = Provider(self, requirement_providers) for component in components: component.initialize_with_provider(self.provider)
def _set_dependencies_and_providers(self, manual_requirements=empty_dict, skip_unused_theories=False): components: List[Theory] = self.components direct_param_dependence: Dict[Theory, Set[str]] = {c: set() for c in components} def _tidy_requirements(_require, _component=None): if not _require: return [] if isinstance(_require, Mapping): _require = [Requirement(name, options) for (name, options) in _require.items()] elif isinstance(_require, str): _require = [Requirement(_require, None)] elif isinstance(_require, Iterable): if all((isinstance(term, str) for term in _require)): _require = [Requirement(name, None) for name in _require] result = [] for item in _require: if isinstance(item, Sequence) and len(item) == 2: result.append(Requirement(item[0], item[1])) else: break else: _require = result raise ValueError('Requirements must be a dict of names and options, a list of names, or an iterable of requirement (name, option) pairs') requirements_in_input_params = set((req.name for req in _require)).intersection(self.input_params) if requirements_in_input_params and _component is not None: direct_param_dependence[_component].update(requirements_in_input_params) return [_req for _req in _require if _req.name not in requirements_in_input_params] else: return _require requirements: Dict[Theory, List[Requirement]] = {} providers: Dict[str, List[Theory]] = {} requirements_are_params: Set[str] = set() for component in components: component.initialize_with_params() if not component.get_requirements(): requirements[component] = [] component.get_requirements() = as_requirement_list(component.get_requirements()) requirements_in_input_params = set((req.name for req in component.get_requirements())).intersection(self.input_params) if requirements_in_input_params and component is not None: direct_param_dependence[component].update(requirements_in_input_params) requirements[component] = [_req for _req in component.get_requirements() if _req.name not in requirements_in_input_params] else: requirements[component] = component.get_requirements() requirements[component] += [Requirement(p, None) for p in getattr(component, 'params', {}) or [] if p not in self.input_params and p not in component.output_params] can_provide = list(component.get_can_provide()) + list(component.get_can_provide_methods()) provide_params = set((p for p in component.get_can_provide_params() if all((p != req.name for req in requirements[component])))) provide_params.update(component.output_params) requirements_are_params.update(provide_params) for k in chain(can_provide, provide_params): providers[k] = providers.get(k, []) if component not in providers[k]: providers[k].append(component) manual_theory = Theory(name='_manual') if manual_requirements: self._manual_requirements = getattr(self, '_manual_requirements', []) + _tidy_requirements(manual_requirements) requirements[manual_theory] = deepcopy(self._manual_requirements) self._must_provide: Dict[Theory, List[Requirement]] = {c: [] for c in components} requirement_providers = {} dependencies: Dict[Theory, Set[Theory]] = {} used_suppliers = set((c for c in components if c.output_params)) there_are_more_requirements = True must_provide = {c: [Requirement(p, None) for p in c.output_params] for c in components} while there_are_more_requirements: for (component, requires) in requirements.items(): for requirement in requires: suppliers = providers.get(requirement.name) if not suppliers: requirements[manual_theory] = [req for req in requirements.get(manual_theory, []) if req.name != requirement.name] raise LoggedError(self.log, 'Requirement %s of %r is not provided by any component, nor sampled directly', requirement.name, component) supplier: Optional[Theory] if len(suppliers) == 1: supplier = suppliers[0] else: supplier = None for sup in suppliers: provide = str_to_list(getattr(sup, 'provides', [])) if requirement.name in provide: if supplier: raise LoggedError(self.log, 'more than one component provides %s', requirement.name) supplier = sup if not supplier: raise LoggedError(self.log, "requirement %s is provided by more than one component: %s. Use 'provides' keyword to specify which provides it", requirement.name, suppliers) if supplier is component: raise LoggedError(self.log, 'Component %r cannot provide %s to itself!', component, requirement.name) requirement_providers[requirement.name] = supplier.get_provider() used_suppliers.add(supplier) declared_requirements_for_this_supplier = self._must_provide[supplier] + must_provide[supplier] if requirement not in declared_requirements_for_this_supplier: must_provide[supplier] += [requirement] dependencies[component] = dependencies.get(component, set()) | {supplier} if component is not manual_theory and requirement.name not in component.input_params and (requirement.name in requirements_are_params): component.input_params_extra.add(requirement.name) there_are_more_requirements = False for (component, requires) in requirements.items(): requires[:] = [] for request in must_provide.get(component) or []: if not component.must_provide(**{request.name: request.options}): conditional_requirements = [] component.must_provide(**{request.name: request.options}) = as_requirement_list(component.must_provide(**{request.name: request.options})) requirements_in_input_params = set((req.name for req in component.must_provide(**{request.name: request.options}))).intersection(self.input_params) if requirements_in_input_params and component is not None: direct_param_dependence[component].update(requirements_in_input_params) conditional_requirements = [_req for _req in component.must_provide(**{request.name: request.options}) if _req.name not in requirements_in_input_params] else: conditional_requirements = component.must_provide(**{request.name: request.options}) self._must_provide[component].append(request) if conditional_requirements: there_are_more_requirements = True requires += conditional_requirements dependence_order: List[Theory] = [] deps = {p: s.copy() for (p, s) in dependencies.items()} comps = [c for c in components if not isinstance(c, AbsorbUnusedParamsLikelihood)] target_length = len(comps) _last = 0 while len(dependence_order) < target_length: for component in list(comps): if not deps.get(component): dependence_order.append(component) comps.remove(component) for (p, dep) in deps.items(): dep.discard(component) if len(dependence_order) == _last: raise LoggedError(self.log, 'Circular dependency, cannot calculate %r' % comps) _last = len(dependence_order) likes = list(self.likelihood.values()) self._component_order = {c: likes.index(c) if c in likes else None for c in dependence_order} must_provide = {c: [] for c in components} requirements.pop(manual_theory, None) if self._unassigned_input: self._unassigned_input.difference_update(*direct_param_dependence.values()) if self._unassigned_input: unassigned = self._unassigned_input - self.prior.external_dependence if unassigned: raise LoggedError(self.log, 'Could not find anything to use input parameter(s) %r.', unassigned) else: self.mpi_warning('Parameter(s) %s are only used by the prior', self._unassigned_input) unused_theories = set(self.theory.values()) - used_suppliers if unused_theories: if skip_unused_theories: self.mpi_debug('Theories %s do not need to be computed and will be skipped', unused_theories) for theory in unused_theories: self._component_order.pop(theory, None) components.remove(theory) else: self.mpi_warning('Theories %s do not appear to be actually used for anything', unused_theories) self.mpi_debug('Components will be computed in the order:') self.mpi_debug(' - %r' % list(self._component_order)) def dependencies_of(_component): deps = set() for c in dependencies.get(_component, []): deps.add(c) deps.update(dependencies_of(c)) return deps self._dependencies = {c: dependencies_of(c) for c in components} self._params_of_dependencies: List[Set[str]] = [set() for _ in self._component_order] for (component, param_dep) in zip(self._component_order, self._params_of_dependencies): param_dep.update(direct_param_dependence.get(component) or []) for dep in self._dependencies.get(component, []): param_dep.update(set(dep.input_params).union(direct_param_dependence.get(dep) or [])) param_dep -= set(component.input_params) if not len(component.input_params) and (not param_dep) and (component.get_name() != 'one'): raise LoggedError(self.log, "Component '%r' seems not to depend on any parameters (neither directly nor indirectly)", component) sampled_input_dependence = self.parameterization.sampled_input_dependence() sampled_dependence: Dict[str, List[Theory]] = {p: [] for p in sampled_input_dependence} for (p, i_s) in sampled_input_dependence.items(): for component in components: if p in component.input_params or (i_s and any((p_i in component.input_params for p_i in i_s))): sampled_dependence[p].append(component) for comp in components: if comp is not component and component in self._dependencies.get(comp, []): sampled_dependence[p].append(comp) self.sampled_dependence = sampled_dependence self.requires_derived: Set[str] = requirements_are_params.intersection(requirement_providers) if self.is_debug_and_mpi_root(): if requirement_providers: self.log.debug('Requirements will be calculated by these components:') for (req, provider) in requirement_providers.items(): self.log.debug('- %s: %s', req, provider) else: self.log.debug('No requirements need to be computed') self.provider = Provider(self, requirement_providers) for component in components: component.initialize_with_provider(self.provider)
cobaya
positive
def viz_ego_agent_views(self, viz_scale=20, view_shape=(15, 15), do_wait=True, wait_key=0, array_only=False): <DeepExtract> viz_image = np.ones((self._nrows * viz_scale, self._ncols * viz_scale, 3), dtype=np.uint8) * 255 for p in np.argwhere(self.agent_reachable_positions_mask): tl = (p[1] * viz_scale - viz_scale // 4, p[0] * viz_scale - viz_scale // 4) br = (p[1] * viz_scale + viz_scale // 4, p[0] * viz_scale + viz_scale // 4) cv2.rectangle(viz_image, tl, br, (210, 210, 210), -1) masks = [self.rotation_to_lifted_object_reachable_position_masks[rot] for rot in sorted(list(self.rotation_to_lifted_object_reachable_position_masks.keys()))] for p in np.argwhere(np.stack(masks, axis=0).any(0) != 0): color = np.array([0, 0, 0]) for (i, mask) in enumerate(masks): if mask[p[0], p[1]] and i < 3: color[i] = 255 elif mask[p[0], p[1]]: color = color // 2 offset = viz_scale // 10 + viz_scale // 4 tl = (p[1] * viz_scale - offset, p[0] * viz_scale - offset) br = (p[1] * viz_scale + offset, p[0] * viz_scale + offset) cv2.rectangle(viz_image, tl, br, tuple((int(i) for i in color)), thickness=viz_scale // 10) for (object_id, tracked_object) in self.tracked_objects.items(): if object_id == self.lifted_object_id: continue elif hasattr(tracked_object, 'object_mask'): object_mask = tracked_object.object_mask row = tracked_object.row col = tracked_object.col (row_rad, col_rad) = (object_mask.shape[0] // 2, object_mask.shape[1] // 2) obj_mask = self.empty_mask() obj_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1] = object_mask boundary_points = self.get_boundary_of_mask(obj_mask) self.draw_path_from_row_cols(points=boundary_points, viz_image=viz_image, viz_scale=viz_scale, color=(255, 165, 0), thickness=max(viz_scale // 10, 1), expand_from_center=np.array([row, col])) if self.lifted_object is not None: self.draw_path_from_row_cols(points=self.get_boundary_of_mask(self.current_lifted_object_mask()), viz_image=viz_image, viz_scale=viz_scale, color=(255, 0, 255), thickness=2, expand_from_center=np.array([self.lifted_object.row, self.lifted_object.col])) self.draw_path_from_row_cols(points=self.get_boundary_of_mask(self.current_near_lifted_object_mask()), viz_image=viz_image, viz_scale=viz_scale, color=(180, 0, 180, 180), thickness=max(viz_scale // 10, 1), expand_from_center=np.array([self.lifted_object.row, self.lifted_object.col])) agent_colors = [(255, 0, 0), (0, 0, 255), (31, 119, 180), (255, 127, 14), (44, 160, 44), (214, 39, 40), (148, 103, 189), (140, 86, 75), (227, 119, 194), (127, 127, 127), (188, 189, 34), (23, 190, 207)] for (i, a) in enumerate(self.agents): forward_dir = MOVE_MAP[a.rot] right_dir = MOVE_MAP[(a.rot + 90) % 360] cv2.drawContours(viz_image, [np.array([(a.col * viz_scale + forward_dir['col'] * (viz_scale // 3), a.row * viz_scale + forward_dir['row'] * (viz_scale // 3)), (a.col * viz_scale - (right_dir['col'] * viz_scale // 4 + forward_dir['col'] * viz_scale // 3), a.row * viz_scale - (right_dir['row'] * viz_scale // 4 + forward_dir['row'] * viz_scale // 3)), (a.col * viz_scale - (-right_dir['col'] * viz_scale // 4 + forward_dir['col'] * viz_scale // 3), a.row * viz_scale - (-right_dir['row'] * viz_scale // 4 + forward_dir['row'] * viz_scale // 3))])], 0, agent_colors[i], -1) if not True: cv2.imshow('aoeu', viz_image) if do_wait: world = str(chr(cv2.waitKey(wait_key) & 255)) else: cv2.waitKey(100) world = viz_image </DeepExtract> assert view_shape[0] == view_shape[1] pad = viz_scale * view_shape[0] world = np.pad(world, ((pad, pad), (pad, pad), (0, 0)), mode='constant', constant_values=255) def to_pix(a): return int(round(a * viz_scale + viz_scale * view_shape[0])) (forward_width, side_width) = view_shape ego_views = [] for agent in self.agents: (row, col, rot) = (agent.row, agent.col, int(agent.rot)) if rot == 0: row -= 1 / 2 row_pix_slice = slice(to_pix(row - forward_width), to_pix(row) + 1) col_pix_slice = slice(to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1) elif rot == 180: row += 1 / 2 row_pix_slice = slice(to_pix(row), to_pix(row + forward_width) + 1) col_pix_slice = slice(to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1) elif rot == 90: col += 1 / 2 row_pix_slice = slice(to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1) col_pix_slice = slice(to_pix(col), to_pix(col + forward_width) + 1) elif rot == 270: col -= 1 / 2 row_pix_slice = slice(to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1) col_pix_slice = slice(to_pix(col - forward_width), to_pix(col) + 1) else: raise NotImplementedError ego_views.append(np.rot90(world[row_pix_slice, col_pix_slice], k=rot // 90)) if not array_only: cv2.imshow('aoeu', np.concatenate(ego_views, axis=0)) if do_wait: return str(chr(cv2.waitKey(wait_key) & 255)) else: cv2.waitKey(100) return ego_views
def viz_ego_agent_views(self, viz_scale=20, view_shape=(15, 15), do_wait=True, wait_key=0, array_only=False): viz_image = np.ones((self._nrows * viz_scale, self._ncols * viz_scale, 3), dtype=np.uint8) * 255 for p in np.argwhere(self.agent_reachable_positions_mask): tl = (p[1] * viz_scale - viz_scale // 4, p[0] * viz_scale - viz_scale // 4) br = (p[1] * viz_scale + viz_scale // 4, p[0] * viz_scale + viz_scale // 4) cv2.rectangle(viz_image, tl, br, (210, 210, 210), -1) masks = [self.rotation_to_lifted_object_reachable_position_masks[rot] for rot in sorted(list(self.rotation_to_lifted_object_reachable_position_masks.keys()))] for p in np.argwhere(np.stack(masks, axis=0).any(0) != 0): color = np.array([0, 0, 0]) for (i, mask) in enumerate(masks): if mask[p[0], p[1]] and i < 3: color[i] = 255 elif mask[p[0], p[1]]: color = color // 2 offset = viz_scale // 10 + viz_scale // 4 tl = (p[1] * viz_scale - offset, p[0] * viz_scale - offset) br = (p[1] * viz_scale + offset, p[0] * viz_scale + offset) cv2.rectangle(viz_image, tl, br, tuple((int(i) for i in color)), thickness=viz_scale // 10) for (object_id, tracked_object) in self.tracked_objects.items(): if object_id == self.lifted_object_id: continue elif hasattr(tracked_object, 'object_mask'): object_mask = tracked_object.object_mask row = tracked_object.row col = tracked_object.col (row_rad, col_rad) = (object_mask.shape[0] // 2, object_mask.shape[1] // 2) obj_mask = self.empty_mask() obj_mask[row - row_rad:row + row_rad + 1, col - col_rad:col + col_rad + 1] = object_mask boundary_points = self.get_boundary_of_mask(obj_mask) self.draw_path_from_row_cols(points=boundary_points, viz_image=viz_image, viz_scale=viz_scale, color=(255, 165, 0), thickness=max(viz_scale // 10, 1), expand_from_center=np.array([row, col])) if self.lifted_object is not None: self.draw_path_from_row_cols(points=self.get_boundary_of_mask(self.current_lifted_object_mask()), viz_image=viz_image, viz_scale=viz_scale, color=(255, 0, 255), thickness=2, expand_from_center=np.array([self.lifted_object.row, self.lifted_object.col])) self.draw_path_from_row_cols(points=self.get_boundary_of_mask(self.current_near_lifted_object_mask()), viz_image=viz_image, viz_scale=viz_scale, color=(180, 0, 180, 180), thickness=max(viz_scale // 10, 1), expand_from_center=np.array([self.lifted_object.row, self.lifted_object.col])) agent_colors = [(255, 0, 0), (0, 0, 255), (31, 119, 180), (255, 127, 14), (44, 160, 44), (214, 39, 40), (148, 103, 189), (140, 86, 75), (227, 119, 194), (127, 127, 127), (188, 189, 34), (23, 190, 207)] for (i, a) in enumerate(self.agents): forward_dir = MOVE_MAP[a.rot] right_dir = MOVE_MAP[(a.rot + 90) % 360] cv2.drawContours(viz_image, [np.array([(a.col * viz_scale + forward_dir['col'] * (viz_scale // 3), a.row * viz_scale + forward_dir['row'] * (viz_scale // 3)), (a.col * viz_scale - (right_dir['col'] * viz_scale // 4 + forward_dir['col'] * viz_scale // 3), a.row * viz_scale - (right_dir['row'] * viz_scale // 4 + forward_dir['row'] * viz_scale // 3)), (a.col * viz_scale - (-right_dir['col'] * viz_scale // 4 + forward_dir['col'] * viz_scale // 3), a.row * viz_scale - (-right_dir['row'] * viz_scale // 4 + forward_dir['row'] * viz_scale // 3))])], 0, agent_colors[i], -1) if not True: cv2.imshow('aoeu', viz_image) if do_wait: world = str(chr(cv2.waitKey(wait_key) & 255)) else: cv2.waitKey(100) world = viz_image assert view_shape[0] == view_shape[1] pad = viz_scale * view_shape[0] world = np.pad(world, ((pad, pad), (pad, pad), (0, 0)), mode='constant', constant_values=255) def to_pix(a): return int(round(a * viz_scale + viz_scale * view_shape[0])) (forward_width, side_width) = view_shape ego_views = [] for agent in self.agents: (row, col, rot) = (agent.row, agent.col, int(agent.rot)) if rot == 0: row -= 1 / 2 row_pix_slice = slice(to_pix(row - forward_width), to_pix(row) + 1) col_pix_slice = slice(to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1) elif rot == 180: row += 1 / 2 row_pix_slice = slice(to_pix(row), to_pix(row + forward_width) + 1) col_pix_slice = slice(to_pix(col - side_width / 2), to_pix(col + side_width / 2) + 1) elif rot == 90: col += 1 / 2 row_pix_slice = slice(to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1) col_pix_slice = slice(to_pix(col), to_pix(col + forward_width) + 1) elif rot == 270: col -= 1 / 2 row_pix_slice = slice(to_pix(row - side_width / 2), to_pix(row + side_width / 2) + 1) col_pix_slice = slice(to_pix(col - forward_width), to_pix(col) + 1) else: raise NotImplementedError ego_views.append(np.rot90(world[row_pix_slice, col_pix_slice], k=rot // 90)) if not array_only: cv2.imshow('aoeu', np.concatenate(ego_views, axis=0)) if do_wait: return str(chr(cv2.waitKey(wait_key) & 255)) else: cv2.waitKey(100) return ego_views
cordial-sync
positive
def evaluate(self, release=False): eval_start_time = time.time() if self.eval_queue is None: self.eval_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index, node_to_index=self.node_to_index, target_to_index=self.target_to_index, config=self.config, is_evaluating=True, num_training_samples=self.num_training_examples) reader_output = self.eval_queue.get_output() <DeepExtract> target_index = reader_output[reader.TARGET_INDEX_KEY] path_source_indices = reader_output[reader.PATH_SOURCE_INDICES_KEY] node_indices = reader_output[reader.NODE_INDICES_KEY] path_target_indices = reader_output[reader.PATH_TARGET_INDICES_KEY] valid_mask = reader_output[reader.VALID_CONTEXT_MASK_KEY] path_source_lengths = reader_output[reader.PATH_SOURCE_LENGTHS_KEY] path_lengths = reader_output[reader.PATH_LENGTHS_KEY] path_target_lengths = reader_output[reader.PATH_TARGET_LENGTHS_KEY] language_ids = reader_output[reader.LANGUAGE_ID] with tf.variable_scope('model', reuse=self.get_should_reuse_variables()): subtoken_vocab = tf.get_variable('SUBTOKENS_VOCAB', shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB', shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) nodes_vocab = tf.get_variable('NODES_VOCAB', shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) if self.use_multilanguage: language_embedding = tf.get_variable('LANGUAGE_EMBEDDING', shape=(len(self.languages), self.config.EMBEDDINGS_SIZE), dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_OUT', uniform=True)) batched_contexts = self.compute_contexts(subtoken_vocab=subtoken_vocab, nodes_vocab=nodes_vocab, source_input=path_source_indices, nodes_input=node_indices, target_input=path_target_indices, valid_mask=valid_mask, path_source_lengths=path_source_lengths, path_lengths=path_lengths, path_target_lengths=path_target_lengths, is_evaluating=True, language_embedding=language_embedding, language_ids=language_ids) (outputs, final_states) = self.decode_outputs(target_words_vocab=target_words_vocab, target_input=target_index, batch_size=tf.shape(target_index)[0], batched_contexts=batched_contexts, valid_mask=valid_mask, is_evaluating=True) if self.config.BEAM_WIDTH > 0: predicted_indices = outputs.predicted_ids topk_values = outputs.beam_search_decoder_output.scores attention_weights = [tf.no_op()] else: predicted_indices = outputs.sample_id topk_values = tf.constant(1, shape=(1, 1), dtype=tf.float32) attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1) (self.eval_predicted_indices_op, self.eval_topk_values, _, _) = (predicted_indices, topk_values, target_index, attention_weights) </DeepExtract> self.eval_true_target_strings_op = reader_output[reader.TARGET_STRING_KEY] if self.use_multilanguage: self.eval_language_ids_op = reader_output[reader.LANGUAGE_ID] self.saver = tf.train.Saver(max_to_keep=10) if self.config.LOAD_PATH and (not self.config.TRAIN_PATH): <DeepExtract> self.sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer())) </DeepExtract> <DeepExtract> if not self.sess is None: self.saver.restore(self.sess, self.config.LOAD_PATH) print('Done loading model') with open(self.config.LOAD_PATH + '.dict', 'rb') as file: if self.subtoken_to_index is not None: return print('Loading dictionaries from: ' + self.config.LOAD_PATH) self.subtoken_to_index = pickle.load(file) self.index_to_subtoken = pickle.load(file) self.subtoken_vocab_size = pickle.load(file) self.target_to_index = pickle.load(file) self.index_to_target = pickle.load(file) self.target_vocab_size = pickle.load(file) self.node_to_index = pickle.load(file) self.index_to_node = pickle.load(file) self.nodes_vocab_size = pickle.load(file) self.num_training_examples = pickle.load(file) self.epochs_trained = pickle.load(file) saved_config = pickle.load(file) self.config.take_model_hyperparams_from(saved_config) print('Done loading dictionaries') </DeepExtract> if release: release_name = self.config.LOAD_PATH + '.release' print('Releasing model, output model: %s' % release_name) self.saver.save(self.sess, release_name) shutil.copyfile(src=self.config.LOAD_PATH + '.dict', dst=release_name + '.dict') return None model_dirname = os.path.dirname(self.config.SAVE_PATH if self.config.SAVE_PATH else self.config.LOAD_PATH) ref_file_name = model_dirname + '/ref.txt' predicted_file_name = model_dirname + '/pred.txt' if not os.path.exists(model_dirname): os.makedirs(model_dirname) lang_file = None if self.use_multilanguage: language_mapping_file_name = model_dirname + '/lang.txt' lang_file = open(language_mapping_file_name, 'w') with open(model_dirname + '/log.txt', 'w') as output_file, open(ref_file_name, 'w') as ref_file, open(predicted_file_name, 'w') as pred_file: num_correct_predictions = 0 if self.config.BEAM_WIDTH == 0 else np.zeros([self.config.BEAM_WIDTH], dtype=np.int32) total_predictions = 0 total_prediction_batches = 0 (true_positive, false_positive, false_negative) = (0, 0, 0) self.eval_queue.reset(self.sess) start_time = time.time() try: while True: if self.use_multilanguage: (predicted_indices, true_target_strings, top_values, language_ids) = self.sess.run([self.eval_predicted_indices_op, self.eval_true_target_strings_op, self.eval_topk_values, self.eval_language_ids_op]) lang_file.write('\n'.join([self.languages[lang_id] for lang_id in language_ids]) + '\n') else: (predicted_indices, true_target_strings, top_values) = self.sess.run([self.eval_predicted_indices_op, self.eval_true_target_strings_op, self.eval_topk_values]) true_target_strings = Common.binary_to_string_list(true_target_strings) ref_file.write('\n'.join([name.replace(Common.internal_delimiter, ' ') for name in true_target_strings]) + '\n') if self.config.BEAM_WIDTH > 0: predicted_strings = [[[self.index_to_target[i] for i in timestep] for timestep in example] for example in predicted_indices] predicted_strings = [list(map(list, zip(*example))) for example in predicted_strings] pred_file.write('\n'.join([' '.join(Common.filter_impossible_names(words)) for words in predicted_strings[0]]) + '\n') else: predicted_strings = [[self.index_to_target[i] for i in example] for example in predicted_indices] pred_file.write('\n'.join([' '.join(Common.filter_impossible_names(words)) for words in predicted_strings]) + '\n') <DeepExtract> for (original_name, predicted) in zip(true_target_strings, predicted_strings): original_name_parts = original_name.split(Common.internal_delimiter) filtered_original = Common.filter_impossible_names(original_name_parts) predicted_first = predicted if self.config.BEAM_WIDTH > 0: predicted_first = predicted[0] filtered_predicted_first_parts = Common.filter_impossible_names(predicted_first) if self.config.BEAM_WIDTH == 0: output_file.write('Original: ' + Common.internal_delimiter.join(original_name_parts) + ' , predicted 1st: ' + Common.internal_delimiter.join(filtered_predicted_first_parts) + '\n') if filtered_original == filtered_predicted_first_parts or Common.unique(filtered_original) == Common.unique(filtered_predicted_first_parts) or ''.join(filtered_original) == ''.join(filtered_predicted_first_parts): num_correct_predictions += 1 else: filtered_predicted = [Common.internal_delimiter.join(Common.filter_impossible_names(p)) for p in predicted] true_ref = original_name output_file.write('Original: ' + ' '.join(original_name_parts) + '\n') for (i, p) in enumerate(filtered_predicted): output_file.write('\t@{}: {}'.format(i + 1, ' '.join(p.split(Common.internal_delimiter))) + '\n') if true_ref in filtered_predicted: index_of_correct = filtered_predicted.index(true_ref) update = np.concatenate([np.zeros(index_of_correct, dtype=np.int32), np.ones(self.config.BEAM_WIDTH - index_of_correct, dtype=np.int32)]) num_correct_predictions += update num_correct_predictions = num_correct_predictions </DeepExtract> <DeepExtract> for (original_name, predicted) in zip(true_target_strings, predicted_strings): if self.config.BEAM_WIDTH > 0: predicted = predicted[0] filtered_predicted_names = Common.filter_impossible_names(predicted) filtered_original_subtokens = Common.filter_impossible_names(original_name.split(Common.internal_delimiter)) if all([st == Common.PAD for st in predicted]) and all([st == Common.PAD for st in original_name]): true_positive += 1 continue if ''.join(filtered_original_subtokens) == ''.join(filtered_predicted_names): true_positive += len(filtered_original_subtokens) continue for subtok in filtered_predicted_names: if subtok in filtered_original_subtokens: true_positive += 1 else: false_positive += 1 for subtok in filtered_original_subtokens: if not subtok in filtered_predicted_names: false_negative += 1 (true_positive, false_positive, false_negative) = (true_positive, false_positive, false_negative) </DeepExtract> total_predictions += len(true_target_strings) total_prediction_batches += 1 if total_prediction_batches % self.num_batches_to_log == 0: elapsed = time.time() - start_time <DeepExtract> accuracy_message = str(num_correct_predictions / total_predictions) throughput_message = 'Prediction throughput: %d' % int(total_predictions / (elapsed if elapsed > 0 else 1)) output_file.write(accuracy_message + '\n') output_file.write(throughput_message) print(throughput_message) </DeepExtract> except tf.errors.OutOfRangeError: pass print('Done testing, epoch reached') output_file.write(str(num_correct_predictions / total_predictions) + '\n') elapsed = int(time.time() - eval_start_time) <DeepExtract> if true_positive + false_positive > 0: precision = true_positive / (true_positive + false_positive) else: precision = 0 if true_positive + false_negative > 0: recall = true_positive / (true_positive + false_negative) else: recall = 0 if precision + recall > 0: f1 = 2 * precision * recall / (precision + recall) else: f1 = 0 (precision, recall, f1) = (precision, recall, f1) </DeepExtract> files_rouge = FilesRouge() rouge = files_rouge.get_scores(hyp_path=predicted_file_name, ref_path=ref_file_name, avg=True, ignore_empty=True) print('Evaluation time: %sh%sm%ss' % (elapsed // 60 // 60, elapsed // 60 % 60, elapsed % 60)) return (num_correct_predictions / total_predictions, precision, recall, f1, rouge)
def evaluate(self, release=False): eval_start_time = time.time() if self.eval_queue is None: self.eval_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index, node_to_index=self.node_to_index, target_to_index=self.target_to_index, config=self.config, is_evaluating=True, num_training_samples=self.num_training_examples) reader_output = self.eval_queue.get_output() target_index = reader_output[reader.TARGET_INDEX_KEY] path_source_indices = reader_output[reader.PATH_SOURCE_INDICES_KEY] node_indices = reader_output[reader.NODE_INDICES_KEY] path_target_indices = reader_output[reader.PATH_TARGET_INDICES_KEY] valid_mask = reader_output[reader.VALID_CONTEXT_MASK_KEY] path_source_lengths = reader_output[reader.PATH_SOURCE_LENGTHS_KEY] path_lengths = reader_output[reader.PATH_LENGTHS_KEY] path_target_lengths = reader_output[reader.PATH_TARGET_LENGTHS_KEY] language_ids = reader_output[reader.LANGUAGE_ID] with tf.variable_scope('model', reuse=self.get_should_reuse_variables()): subtoken_vocab = tf.get_variable('SUBTOKENS_VOCAB', shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB', shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) nodes_vocab = tf.get_variable('NODES_VOCAB', shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE), dtype=tf.float32, trainable=False) if self.use_multilanguage: language_embedding = tf.get_variable('LANGUAGE_EMBEDDING', shape=(len(self.languages), self.config.EMBEDDINGS_SIZE), dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_OUT', uniform=True)) batched_contexts = self.compute_contexts(subtoken_vocab=subtoken_vocab, nodes_vocab=nodes_vocab, source_input=path_source_indices, nodes_input=node_indices, target_input=path_target_indices, valid_mask=valid_mask, path_source_lengths=path_source_lengths, path_lengths=path_lengths, path_target_lengths=path_target_lengths, is_evaluating=True, language_embedding=language_embedding, language_ids=language_ids) (outputs, final_states) = self.decode_outputs(target_words_vocab=target_words_vocab, target_input=target_index, batch_size=tf.shape(target_index)[0], batched_contexts=batched_contexts, valid_mask=valid_mask, is_evaluating=True) if self.config.BEAM_WIDTH > 0: predicted_indices = outputs.predicted_ids topk_values = outputs.beam_search_decoder_output.scores attention_weights = [tf.no_op()] else: predicted_indices = outputs.sample_id topk_values = tf.constant(1, shape=(1, 1), dtype=tf.float32) attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1) (self.eval_predicted_indices_op, self.eval_topk_values, _, _) = (predicted_indices, topk_values, target_index, attention_weights) self.eval_true_target_strings_op = reader_output[reader.TARGET_STRING_KEY] if self.use_multilanguage: self.eval_language_ids_op = reader_output[reader.LANGUAGE_ID] self.saver = tf.train.Saver(max_to_keep=10) if self.config.LOAD_PATH and (not self.config.TRAIN_PATH): self.sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer())) if not self.sess is None: self.saver.restore(self.sess, self.config.LOAD_PATH) print('Done loading model') with open(self.config.LOAD_PATH + '.dict', 'rb') as file: if self.subtoken_to_index is not None: return print('Loading dictionaries from: ' + self.config.LOAD_PATH) self.subtoken_to_index = pickle.load(file) self.index_to_subtoken = pickle.load(file) self.subtoken_vocab_size = pickle.load(file) self.target_to_index = pickle.load(file) self.index_to_target = pickle.load(file) self.target_vocab_size = pickle.load(file) self.node_to_index = pickle.load(file) self.index_to_node = pickle.load(file) self.nodes_vocab_size = pickle.load(file) self.num_training_examples = pickle.load(file) self.epochs_trained = pickle.load(file) saved_config = pickle.load(file) self.config.take_model_hyperparams_from(saved_config) print('Done loading dictionaries') if release: release_name = self.config.LOAD_PATH + '.release' print('Releasing model, output model: %s' % release_name) self.saver.save(self.sess, release_name) shutil.copyfile(src=self.config.LOAD_PATH + '.dict', dst=release_name + '.dict') return None model_dirname = os.path.dirname(self.config.SAVE_PATH if self.config.SAVE_PATH else self.config.LOAD_PATH) ref_file_name = model_dirname + '/ref.txt' predicted_file_name = model_dirname + '/pred.txt' if not os.path.exists(model_dirname): os.makedirs(model_dirname) lang_file = None if self.use_multilanguage: language_mapping_file_name = model_dirname + '/lang.txt' lang_file = open(language_mapping_file_name, 'w') with open(model_dirname + '/log.txt', 'w') as output_file, open(ref_file_name, 'w') as ref_file, open(predicted_file_name, 'w') as pred_file: num_correct_predictions = 0 if self.config.BEAM_WIDTH == 0 else np.zeros([self.config.BEAM_WIDTH], dtype=np.int32) total_predictions = 0 total_prediction_batches = 0 (true_positive, false_positive, false_negative) = (0, 0, 0) self.eval_queue.reset(self.sess) start_time = time.time() try: while True: if self.use_multilanguage: (predicted_indices, true_target_strings, top_values, language_ids) = self.sess.run([self.eval_predicted_indices_op, self.eval_true_target_strings_op, self.eval_topk_values, self.eval_language_ids_op]) lang_file.write('\n'.join([self.languages[lang_id] for lang_id in language_ids]) + '\n') else: (predicted_indices, true_target_strings, top_values) = self.sess.run([self.eval_predicted_indices_op, self.eval_true_target_strings_op, self.eval_topk_values]) true_target_strings = Common.binary_to_string_list(true_target_strings) ref_file.write('\n'.join([name.replace(Common.internal_delimiter, ' ') for name in true_target_strings]) + '\n') if self.config.BEAM_WIDTH > 0: predicted_strings = [[[self.index_to_target[i] for i in timestep] for timestep in example] for example in predicted_indices] predicted_strings = [list(map(list, zip(*example))) for example in predicted_strings] pred_file.write('\n'.join([' '.join(Common.filter_impossible_names(words)) for words in predicted_strings[0]]) + '\n') else: predicted_strings = [[self.index_to_target[i] for i in example] for example in predicted_indices] pred_file.write('\n'.join([' '.join(Common.filter_impossible_names(words)) for words in predicted_strings]) + '\n') for (original_name, predicted) in zip(true_target_strings, predicted_strings): original_name_parts = original_name.split(Common.internal_delimiter) filtered_original = Common.filter_impossible_names(original_name_parts) predicted_first = predicted if self.config.BEAM_WIDTH > 0: predicted_first = predicted[0] filtered_predicted_first_parts = Common.filter_impossible_names(predicted_first) if self.config.BEAM_WIDTH == 0: output_file.write('Original: ' + Common.internal_delimiter.join(original_name_parts) + ' , predicted 1st: ' + Common.internal_delimiter.join(filtered_predicted_first_parts) + '\n') if filtered_original == filtered_predicted_first_parts or Common.unique(filtered_original) == Common.unique(filtered_predicted_first_parts) or ''.join(filtered_original) == ''.join(filtered_predicted_first_parts): num_correct_predictions += 1 else: filtered_predicted = [Common.internal_delimiter.join(Common.filter_impossible_names(p)) for p in predicted] true_ref = original_name output_file.write('Original: ' + ' '.join(original_name_parts) + '\n') for (i, p) in enumerate(filtered_predicted): output_file.write('\t@{}: {}'.format(i + 1, ' '.join(p.split(Common.internal_delimiter))) + '\n') if true_ref in filtered_predicted: index_of_correct = filtered_predicted.index(true_ref) update = np.concatenate([np.zeros(index_of_correct, dtype=np.int32), np.ones(self.config.BEAM_WIDTH - index_of_correct, dtype=np.int32)]) num_correct_predictions += update num_correct_predictions = num_correct_predictions for (original_name, predicted) in zip(true_target_strings, predicted_strings): if self.config.BEAM_WIDTH > 0: predicted = predicted[0] filtered_predicted_names = Common.filter_impossible_names(predicted) filtered_original_subtokens = Common.filter_impossible_names(original_name.split(Common.internal_delimiter)) if all([st == Common.PAD for st in predicted]) and all([st == Common.PAD for st in original_name]): true_positive += 1 continue if ''.join(filtered_original_subtokens) == ''.join(filtered_predicted_names): true_positive += len(filtered_original_subtokens) continue for subtok in filtered_predicted_names: if subtok in filtered_original_subtokens: true_positive += 1 else: false_positive += 1 for subtok in filtered_original_subtokens: if not subtok in filtered_predicted_names: false_negative += 1 (true_positive, false_positive, false_negative) = (true_positive, false_positive, false_negative) total_predictions += len(true_target_strings) total_prediction_batches += 1 if total_prediction_batches % self.num_batches_to_log == 0: elapsed = time.time() - start_time accuracy_message = str(num_correct_predictions / total_predictions) throughput_message = 'Prediction throughput: %d' % int(total_predictions / (elapsed if elapsed > 0 else 1)) output_file.write(accuracy_message + '\n') output_file.write(throughput_message) print(throughput_message) except tf.errors.OutOfRangeError: pass print('Done testing, epoch reached') output_file.write(str(num_correct_predictions / total_predictions) + '\n') elapsed = int(time.time() - eval_start_time) if true_positive + false_positive > 0: precision = true_positive / (true_positive + false_positive) else: precision = 0 if true_positive + false_negative > 0: recall = true_positive / (true_positive + false_negative) else: recall = 0 if precision + recall > 0: f1 = 2 * precision * recall / (precision + recall) else: f1 = 0 (precision, recall, f1) = (precision, recall, f1) files_rouge = FilesRouge() rouge = files_rouge.get_scores(hyp_path=predicted_file_name, ref_path=ref_file_name, avg=True, ignore_empty=True) print('Evaluation time: %sh%sm%ss' % (elapsed // 60 // 60, elapsed // 60 % 60, elapsed % 60)) return (num_correct_predictions / total_predictions, precision, recall, f1, rouge)
code-transformer
positive
def setup(bot): <DeepExtract> if not os.path.exists(PATH): print('Creating data/customgcom folder...') os.makedirs(PATH) </DeepExtract> <DeepExtract> if not dataIO.is_valid_json(JSON): print('Creating empty %s' % JSON) default = {'ALIASES': {}, 'COMMANDS': {}, '_CGCOM_VERSION': 2} dataIO.save_json(JSON, default) </DeepExtract> bot.add_cog(CustomGlobalCommands(bot))
def setup(bot): if not os.path.exists(PATH): print('Creating data/customgcom folder...') os.makedirs(PATH) if not dataIO.is_valid_json(JSON): print('Creating empty %s' % JSON) default = {'ALIASES': {}, 'COMMANDS': {}, '_CGCOM_VERSION': 2} dataIO.save_json(JSON, default) bot.add_cog(CustomGlobalCommands(bot))
calebj-cogs
positive
def ensure_igw_absent(vpc_id): <DeepExtract> filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) try: if not gateway_id: igws = describe_igws_with_backoff(self._connection, Filters=filters) else: igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e) igw = None if len(igws) > 1: self._module.fail_json(msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id)) elif igws: igw = camel_dict_to_snake_dict(igws[0]) igw = igw </DeepExtract> if igw is None: return self._results if self._check_mode: self._results['changed'] = True return self._results try: self._results['changed'] = True self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg='Unable to delete Internet Gateway') return self._results
def ensure_igw_absent(vpc_id): filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) try: if not gateway_id: igws = describe_igws_with_backoff(self._connection, Filters=filters) else: igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e) igw = None if len(igws) > 1: self._module.fail_json(msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting'.format(vpc_id)) elif igws: igw = camel_dict_to_snake_dict(igws[0]) igw = igw if igw is None: return self._results if self._check_mode: self._results['changed'] = True return self._results try: self._results['changed'] = True self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id'], VpcId=vpc_id) self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw['internet_gateway_id']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg='Unable to delete Internet Gateway') return self._results
amazon.aws
positive
def _validate_name(self, parent_id, label_name): assert parent_id == labelplus.common.label.ID_NULL or parent_id in self._labels labelplus.common.label.validate_name(label_name) <DeepExtract> assert parent_id == labelplus.common.label.ID_NULL or parent_id in self._labels names = [] for id in self._index[parent_id]['children']: names.append(self._labels[id]['name']) names = names </DeepExtract> if label_name in names: raise LabelPlusError(ERR_LABEL_EXISTS)
def _validate_name(self, parent_id, label_name): assert parent_id == labelplus.common.label.ID_NULL or parent_id in self._labels labelplus.common.label.validate_name(label_name) assert parent_id == labelplus.common.label.ID_NULL or parent_id in self._labels names = [] for id in self._index[parent_id]['children']: names.append(self._labels[id]['name']) names = names if label_name in names: raise LabelPlusError(ERR_LABEL_EXISTS)
deluge-labelplus
positive
def test_join_inner(): <DeepExtract> np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=100) elif dt is bool: s = np.where(np.random.randint(2, size=100), True, False) elif dt is float: s = np.random.rand(100) elif dt is str: r = [f'ssssss{x}' for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([r[x] for x in (float, 40)]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([r[x] for x in (float, 40)]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([rt[x] for x in (float, 40)]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(100, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (int, 40) = pd.DataFrame(data) </DeepExtract> <DeepExtract> np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=80) elif dt is bool: s = np.where(np.random.randint(2, size=80), True, False) elif dt is float: s = np.random.rand(80) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=80) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(80, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (str, 10) = pd.DataFrame(data) </DeepExtract> <DeepExtract> c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n SELECT\n a.*, d, d*c AS x\n FROM a\n INNER JOIN b ON a.a=b.a AND a.b=b.b\n ORDER BY a.a NULLS FIRST, a.b NULLS FIRST, a.c NULLS FIRST, d NULLS FIRST\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n SELECT\n a.*, d, d*c AS x\n FROM a\n INNER JOIN b ON a.a=b.a AND a.b=b.b\n ORDER BY a.a NULLS FIRST, a.b NULLS FIRST, a.c NULLS FIRST, d NULLS FIRST\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
def test_join_inner(): np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=100) elif dt is bool: s = np.where(np.random.randint(2, size=100), True, False) elif dt is float: s = np.random.rand(100) elif dt is str: r = [f'ssssss{x}' for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([r[x] for x in (float, 40)]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([r[x] for x in (float, 40)]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] (float, 40) = np.random.randint(10, size=100) s = np.array([rt[x] for x in (float, 40)]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(100, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (int, 40) = pd.DataFrame(data) np.random.seed(0) data = {} for (k, v) in kwargs.items(): if not isinstance(v, tuple): v = (v, 0.0) (dt, null_ct) = (v[0], v[1]) if dt is int: s = np.random.randint(10, size=80) elif dt is bool: s = np.where(np.random.randint(2, size=80), True, False) elif dt is float: s = np.random.rand(80) elif dt is str: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) elif dt is pd.StringDtype: r = [f'ssssss{x}' for x in range(10)] c = np.random.randint(10, size=80) s = np.array([r[x] for x in c]) s = pd.array(s, dtype='string') elif dt is datetime: rt = [datetime(2020, 1, 1) + timedelta(days=x) for x in range(10)] c = np.random.randint(10, size=80) s = np.array([rt[x] for x in c]) else: raise NotImplementedError ps = pd.Series(s) if null_ct > 0: idx = np.random.choice(80, null_ct, replace=False).tolist() ps[idx] = None data[k] = ps (str, 10) = pd.DataFrame(data) c = Context() engine = sqlite3.connect(':memory:') for (name, df) in dfs.items(): c.create_table(name, df) df.to_sql(name, engine, index=False) dask_result = c.sql('\n SELECT\n a.*, d, d*c AS x\n FROM a\n INNER JOIN b ON a.a=b.a AND a.b=b.b\n ORDER BY a.a NULLS FIRST, a.b NULLS FIRST, a.c NULLS FIRST, d NULLS FIRST\n ').reset_index(drop=True) sqlite_result = pd.read_sql('\n SELECT\n a.*, d, d*c AS x\n FROM a\n INNER JOIN b ON a.a=b.a AND a.b=b.b\n ORDER BY a.a NULLS FIRST, a.b NULLS FIRST, a.c NULLS FIRST, d NULLS FIRST\n ', engine).reset_index(drop=True) dask_result = cast_datetime_to_string(dask_result) dask_result = dask_result.fillna(np.NaN) sqlite_result = sqlite_result.fillna(np.NaN) assert_eq(dask_result, sqlite_result, check_dtype=False, check_index=check_index) </DeepExtract>
dask-sql
positive
def __init__(self, annotation_path): """Class for reading and visualizing annotations. Args: annotation_path (str): location of annotation file """ self.logger = logging.getLogger(__name__) self.logger.info('Loading annotations.') <DeepExtract> with open(annotation_path, 'r') as f: self.dataset = json.load(f) </DeepExtract> assert type(self.dataset) == dict, 'Annotation file format {} not supported.'.format(type(self.dataset)) <DeepExtract> self.logger.info('Creating index.') self.img_ann_map = defaultdict(list) self.cat_img_map = defaultdict(list) self.anns = {} self.cats = {} self.imgs = {} for ann in self.dataset['annotations']: self.img_ann_map[ann['image_id']].append(ann) self.anns[ann['id']] = ann for img in self.dataset['images']: self.imgs[img['id']] = img for cat in self.dataset['categories']: self.cats[cat['id']] = cat for ann in self.dataset['annotations']: self.cat_img_map[ann['category_id']].append(ann['image_id']) self.logger.info('Index created.') </DeepExtract>
def __init__(self, annotation_path): """Class for reading and visualizing annotations. Args: annotation_path (str): location of annotation file """ self.logger = logging.getLogger(__name__) self.logger.info('Loading annotations.') with open(annotation_path, 'r') as f: self.dataset = json.load(f) assert type(self.dataset) == dict, 'Annotation file format {} not supported.'.format(type(self.dataset)) self.logger.info('Creating index.') self.img_ann_map = defaultdict(list) self.cat_img_map = defaultdict(list) self.anns = {} self.cats = {} self.imgs = {} for ann in self.dataset['annotations']: self.img_ann_map[ann['image_id']].append(ann) self.anns[ann['id']] = ann for img in self.dataset['images']: self.imgs[img['id']] = img for cat in self.dataset['categories']: self.cats[cat['id']] = cat for ann in self.dataset['annotations']: self.cat_img_map[ann['category_id']].append(ann['image_id']) self.logger.info('Index created.') </DeepExtract>
ACSL
positive
def _aggregate_answer(server, client, response, referral, qname, rdtype, rdclass): msg = response.message dname_rrsets = [x for x in msg.answer if x.rdtype == dns.rdatatype.DNAME and x.rdclass == rdclass] dname_rrsets.sort(reverse=True) qname_sought = qname try: i = 0 while i < MAX_CNAME_REDIRECTION: synthesized_cname_info = None for dname_rrset in dname_rrsets: if qname_sought.parent().is_subdomain(dname_rrset.name): synthesized_cname_info = RRsetInfo(cname_from_dname(qname_sought, dname_rrset), self.ttl_cmp, RRsetInfo(dname_rrset, self.ttl_cmp)) break try: <DeepExtract> msg = response.message try: rrset = msg.find_rrset(msg.answer, qname_sought, rdclass, rdtype) except KeyError: rrset = msg.find_rrset(msg.answer, qname_sought, rdclass, dns.rdatatype.CNAME) rrset_info = RRsetInfo(rrset, self.ttl_cmp) rrset_info = DNSResponseComponent.insert_into_list(rrset_info, self.answer_info, server, client, response) rrset_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) rrset_info = rrset_info </DeepExtract> if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass and (synthesized_cname_info is not None): synthesized_cname_info = rrset_info.create_or_update_cname_from_dname_info(synthesized_cname_info, server, client, response, rdclass) synthesized_cname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) except KeyError: if synthesized_cname_info is None: raise synthesized_cname_info = DNSResponseComponent.insert_into_list(synthesized_cname_info, self.answer_info, server, client, response) synthesized_cname_info.dname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) rrset_info = synthesized_cname_info if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass: qname_sought = rrset_info.rrset[0].target else: break i += 1 except KeyError: if referral and rdtype != dns.rdatatype.DS: try: rrset = [x for x in msg.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.NS and (x.rdclass == rdclass)][0] except IndexError: pass else: referral_info = ReferralResponse(rrset.name) DNSResponseComponent.insert_into_list(referral_info, self.referral_info, server, client, response) return if qname_sought != qname and (not response.recursion_desired_and_available()): return if msg.rcode() == dns.rcode.NXDOMAIN: neg_response_info_list = self.nxdomain_info else: neg_response_info_list = self.nodata_info neg_response_info = NegativeResponseInfo(qname_sought, rdtype, self.ttl_cmp) neg_response_info = DNSResponseComponent.insert_into_list(neg_response_info, neg_response_info_list, server, client, response) neg_response_info.create_or_update_nsec_info(server, client, response, rdclass, referral) neg_response_info.create_or_update_soa_info(server, client, response, rdclass, referral)
def _aggregate_answer(server, client, response, referral, qname, rdtype, rdclass): msg = response.message dname_rrsets = [x for x in msg.answer if x.rdtype == dns.rdatatype.DNAME and x.rdclass == rdclass] dname_rrsets.sort(reverse=True) qname_sought = qname try: i = 0 while i < MAX_CNAME_REDIRECTION: synthesized_cname_info = None for dname_rrset in dname_rrsets: if qname_sought.parent().is_subdomain(dname_rrset.name): synthesized_cname_info = RRsetInfo(cname_from_dname(qname_sought, dname_rrset), self.ttl_cmp, RRsetInfo(dname_rrset, self.ttl_cmp)) break try: msg = response.message try: rrset = msg.find_rrset(msg.answer, qname_sought, rdclass, rdtype) except KeyError: rrset = msg.find_rrset(msg.answer, qname_sought, rdclass, dns.rdatatype.CNAME) rrset_info = RRsetInfo(rrset, self.ttl_cmp) rrset_info = DNSResponseComponent.insert_into_list(rrset_info, self.answer_info, server, client, response) rrset_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) rrset_info = rrset_info if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass and (synthesized_cname_info is not None): synthesized_cname_info = rrset_info.create_or_update_cname_from_dname_info(synthesized_cname_info, server, client, response, rdclass) synthesized_cname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) except KeyError: if synthesized_cname_info is None: raise synthesized_cname_info = DNSResponseComponent.insert_into_list(synthesized_cname_info, self.answer_info, server, client, response) synthesized_cname_info.dname_info.update_rrsig_info(server, client, response, msg.answer, rdclass, referral) rrset_info = synthesized_cname_info if rrset_info.rrset.rdtype == dns.rdatatype.CNAME and rrset_info.rrset.rdclass == rdclass: qname_sought = rrset_info.rrset[0].target else: break i += 1 except KeyError: if referral and rdtype != dns.rdatatype.DS: try: rrset = [x for x in msg.authority if qname.is_subdomain(x.name) and x.rdtype == dns.rdatatype.NS and (x.rdclass == rdclass)][0] except IndexError: pass else: referral_info = ReferralResponse(rrset.name) DNSResponseComponent.insert_into_list(referral_info, self.referral_info, server, client, response) return if qname_sought != qname and (not response.recursion_desired_and_available()): return if msg.rcode() == dns.rcode.NXDOMAIN: neg_response_info_list = self.nxdomain_info else: neg_response_info_list = self.nodata_info neg_response_info = NegativeResponseInfo(qname_sought, rdtype, self.ttl_cmp) neg_response_info = DNSResponseComponent.insert_into_list(neg_response_info, neg_response_info_list, server, client, response) neg_response_info.create_or_update_nsec_info(server, client, response, rdclass, referral) neg_response_info.create_or_update_soa_info(server, client, response, rdclass, referral)
dnsviz
positive
def define_empty_source_parallel_buckets(max_seq_len_target: int, bucket_width: int=10) -> List[Tuple[int, int]]: """ Returns (source, target) buckets up to (None, max_seq_len_target). The source is empty since it is supposed to not contain data that can be bucketized. The target is used as reference to create the buckets. :param max_seq_len_target: Maximum target bucket size. :param bucket_width: Width of buckets on longer side. """ target_step_size = max(1, bucket_width) <DeepExtract> buckets = [bucket_len for bucket_len in range(target_step_size, max_seq_len_target + target_step_size, target_step_size)] buckets[-1] = max_seq_len_target target_buckets = buckets </DeepExtract> source_buckets = [0 for b in target_buckets] target_buckets = [max(2, b) for b in target_buckets] parallel_buckets = list(zip(source_buckets, target_buckets)) buckets = list(OrderedDict.fromkeys(parallel_buckets)) buckets.sort() return buckets
def define_empty_source_parallel_buckets(max_seq_len_target: int, bucket_width: int=10) -> List[Tuple[int, int]]: """ Returns (source, target) buckets up to (None, max_seq_len_target). The source is empty since it is supposed to not contain data that can be bucketized. The target is used as reference to create the buckets. :param max_seq_len_target: Maximum target bucket size. :param bucket_width: Width of buckets on longer side. """ target_step_size = max(1, bucket_width) buckets = [bucket_len for bucket_len in range(target_step_size, max_seq_len_target + target_step_size, target_step_size)] buckets[-1] = max_seq_len_target target_buckets = buckets source_buckets = [0 for b in target_buckets] target_buckets = [max(2, b) for b in target_buckets] parallel_buckets = list(zip(source_buckets, target_buckets)) buckets = list(OrderedDict.fromkeys(parallel_buckets)) buckets.sort() return buckets
DCGCN
positive
def __init__(self, path, img_size=(1088, 608)): self.cap = cv2.VideoCapture(path) self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS))) self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.width = img_size[0] self.height = img_size[1] self.count = 0 <DeepExtract> (wa, ha) = (float(self.width) / self.vw, float(self.height) / self.vh) a = min(wa, ha) (self.w, self.h) = (int(self.vw * a), int(self.vh * a)) </DeepExtract> print('Lenth of the video: {:d} frames'.format(self.vn))
def __init__(self, path, img_size=(1088, 608)): self.cap = cv2.VideoCapture(path) self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS))) self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) self.width = img_size[0] self.height = img_size[1] self.count = 0 (wa, ha) = (float(self.width) / self.vw, float(self.height) / self.vh) a = min(wa, ha) (self.w, self.h) = (int(self.vw * a), int(self.vh * a)) print('Lenth of the video: {:d} frames'.format(self.vn))
AlphAction
positive
def get_touched_tiles(dataset_bbox, cube_origin, cube_tile_size): """Return a list of tuples (itile, jtile) comprising all tiles footprints that intersect the dataset bounding box""" <DeepExtract> (xyul, xyur, xylr, xyll) = dataset_bbox (xul, yul) = xyul (xur, yur) = xyur (xlr, ylr) = xylr (xll, yll) = xyll (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size xmin = max(xll, xul) xmax = min(xlr, xur) ymin = max(yll, ylr) ymax = min(yul, yur) xmin_index = int(floor((xmin - xorigin) / xsize)) xmax_index = int(floor((xmax - xorigin) / xsize)) ymin_index = int(floor((ymin - yorigin) / ysize)) ymax_index = int(floor((ymax - yorigin) / ysize)) definite_tiles = set([(itile, jtile) for itile in range(xmin_index, xmax_index + 1) for jtile in range(ymin_index, ymax_index + 1)]) xmin = min(xll, xul) xmax = max(xlr, xur) ymin = min(yll, ylr) ymax = max(yul, yur) xmin_index = int(floor((xmin - xorigin) / xsize)) xmax_index = int(floor((xmax - xorigin) / xsize)) ymin_index = int(floor((ymin - yorigin) / ysize)) ymax_index = int(floor((ymax - yorigin) / ysize)) possible_tiles = set([(itile, jtile) for itile in range(xmin_index, xmax_index + 1) for jtile in range(ymin_index, ymax_index + 1)]).difference(definite_tiles) (definite_tiles, possible_tiles) = (definite_tiles, possible_tiles) </DeepExtract> coverage_set = definite_tiles <DeepExtract> (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size keep_list = [] for (itile, jtile) in possible_tiles: intersection_exists = False (x0, y0) = (xorigin + itile * xsize, yorigin + (jtile + 1) * ysize) tile_bbox = [(x0, y0), (x0 + xsize, y0), (x0 + xsize, y0 - ysize), (x0, y0 - ysize)] tile_vtx_number = len(tile_bbox) dset_vtx_number = len(dataset_bbox) for tile_vtx in range(tile_vtx_number): (x1, y1) = tile_bbox[tile_vtx] (x2, y2) = tile_bbox[(tile_vtx + 1) % tile_vtx_number] for dset_vtx in range(dset_vtx_number): (x3, y3) = dataset_bbox[dset_vtx] (x4, y4) = dataset_bbox[(dset_vtx + 1) % dset_vtx_number] xcoords = [x1, x2, x3, x4] ycoords = [y1, y2, y3, y4] intersection_exists = self.check_intersection(xcoords, ycoords) if intersection_exists: keep_list.append((itile, jtile)) break if intersection_exists: break intersected_tiles = set(keep_list) </DeepExtract> coverage_set = coverage_set.union(intersected_tiles) possible_tiles = possible_tiles.difference(intersected_tiles) <DeepExtract> (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size keep_list = [] for (itile, jtile) in possible_tiles: tile_vtx_inside = [] (x0, y0) = (xorigin + itile * xsize, yorigin + (jtile + 1) * ysize) tile_bbox = [(x0, y0), (x0 + xsize, y0), (x0 + xsize, y0 - ysize), (x0, y0 - ysize)] dset_vtx_number = len(dataset_bbox) for (x, y) in tile_bbox: winding_number = 0 for dset_vtx in range(dset_vtx_number): (x1, y1) = dataset_bbox[dset_vtx] (x2, y2) = dataset_bbox[(dset_vtx + 1) % dset_vtx_number] if y >= y1 and y < y2: if (x - x1) * (y2 - y1) > (x2 - x1) * (y - y1): winding_number += 1 elif y <= y1 and y > y2: if (x - x1) * (y2 - y1) < (x2 - x1) * (y - y1): winding_number += 1 tile_vtx_inside.append(winding_number % 2 == 1) if tile_vtx_inside.count(True) == len(tile_bbox): keep_list.append((itile, jtile)) assert tile_vtx_inside.count(True) == 4 or tile_vtx_inside.count(True) == 0, 'Tile partially inside dataset bounding box but hasno intersection' contained_tiles = set(keep_list) </DeepExtract> coverage_set = coverage_set.union(contained_tiles) return coverage_set
def get_touched_tiles(dataset_bbox, cube_origin, cube_tile_size): """Return a list of tuples (itile, jtile) comprising all tiles footprints that intersect the dataset bounding box""" (xyul, xyur, xylr, xyll) = dataset_bbox (xul, yul) = xyul (xur, yur) = xyur (xlr, ylr) = xylr (xll, yll) = xyll (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size xmin = max(xll, xul) xmax = min(xlr, xur) ymin = max(yll, ylr) ymax = min(yul, yur) xmin_index = int(floor((xmin - xorigin) / xsize)) xmax_index = int(floor((xmax - xorigin) / xsize)) ymin_index = int(floor((ymin - yorigin) / ysize)) ymax_index = int(floor((ymax - yorigin) / ysize)) definite_tiles = set([(itile, jtile) for itile in range(xmin_index, xmax_index + 1) for jtile in range(ymin_index, ymax_index + 1)]) xmin = min(xll, xul) xmax = max(xlr, xur) ymin = min(yll, ylr) ymax = max(yul, yur) xmin_index = int(floor((xmin - xorigin) / xsize)) xmax_index = int(floor((xmax - xorigin) / xsize)) ymin_index = int(floor((ymin - yorigin) / ysize)) ymax_index = int(floor((ymax - yorigin) / ysize)) possible_tiles = set([(itile, jtile) for itile in range(xmin_index, xmax_index + 1) for jtile in range(ymin_index, ymax_index + 1)]).difference(definite_tiles) (definite_tiles, possible_tiles) = (definite_tiles, possible_tiles) coverage_set = definite_tiles (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size keep_list = [] for (itile, jtile) in possible_tiles: intersection_exists = False (x0, y0) = (xorigin + itile * xsize, yorigin + (jtile + 1) * ysize) tile_bbox = [(x0, y0), (x0 + xsize, y0), (x0 + xsize, y0 - ysize), (x0, y0 - ysize)] tile_vtx_number = len(tile_bbox) dset_vtx_number = len(dataset_bbox) for tile_vtx in range(tile_vtx_number): (x1, y1) = tile_bbox[tile_vtx] (x2, y2) = tile_bbox[(tile_vtx + 1) % tile_vtx_number] for dset_vtx in range(dset_vtx_number): (x3, y3) = dataset_bbox[dset_vtx] (x4, y4) = dataset_bbox[(dset_vtx + 1) % dset_vtx_number] xcoords = [x1, x2, x3, x4] ycoords = [y1, y2, y3, y4] intersection_exists = self.check_intersection(xcoords, ycoords) if intersection_exists: keep_list.append((itile, jtile)) break if intersection_exists: break intersected_tiles = set(keep_list) coverage_set = coverage_set.union(intersected_tiles) possible_tiles = possible_tiles.difference(intersected_tiles) (xorigin, yorigin) = cube_origin (xsize, ysize) = cube_tile_size keep_list = [] for (itile, jtile) in possible_tiles: tile_vtx_inside = [] (x0, y0) = (xorigin + itile * xsize, yorigin + (jtile + 1) * ysize) tile_bbox = [(x0, y0), (x0 + xsize, y0), (x0 + xsize, y0 - ysize), (x0, y0 - ysize)] dset_vtx_number = len(dataset_bbox) for (x, y) in tile_bbox: winding_number = 0 for dset_vtx in range(dset_vtx_number): (x1, y1) = dataset_bbox[dset_vtx] (x2, y2) = dataset_bbox[(dset_vtx + 1) % dset_vtx_number] if y >= y1 and y < y2: if (x - x1) * (y2 - y1) > (x2 - x1) * (y - y1): winding_number += 1 elif y <= y1 and y > y2: if (x - x1) * (y2 - y1) < (x2 - x1) * (y - y1): winding_number += 1 tile_vtx_inside.append(winding_number % 2 == 1) if tile_vtx_inside.count(True) == len(tile_bbox): keep_list.append((itile, jtile)) assert tile_vtx_inside.count(True) == 4 or tile_vtx_inside.count(True) == 0, 'Tile partially inside dataset bounding box but hasno intersection' contained_tiles = set(keep_list) coverage_set = coverage_set.union(contained_tiles) return coverage_set
agdc
positive
def ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): def stack_fn(x): <DeepExtract> x = block2(x, 64, conv_shortcut=True, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3): x = block2(x, 64, name='conv2' + '_block' + str(i)) x = block2(x, 64, stride=stride1, name='conv2' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block2(x, 128, conv_shortcut=True, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 8): x = block2(x, 128, name='conv3' + '_block' + str(i)) x = block2(x, 128, stride=stride1, name='conv3' + '_block' + str(8), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block2(x, 256, conv_shortcut=True, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 36): x = block2(x, 256, name='conv4' + '_block' + str(i)) x = block2(x, 256, stride=stride1, name='conv4' + '_block' + str(36), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> <DeepExtract> x = block2(x, 512, conv_shortcut=True, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3): x = block2(x, 512, name='conv5' + '_block' + str(i)) x = block2(x, 512, stride=1, name='conv5' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay) x = x </DeepExtract> return x return ResNet(stack_fn, True, True, 'resnet152v2', include_top, weights, input_tensor, input_shape, pooling, classes, **kwargs)
def ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): def stack_fn(x): x = block2(x, 64, conv_shortcut=True, name='conv2' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3): x = block2(x, 64, name='conv2' + '_block' + str(i)) x = block2(x, 64, stride=stride1, name='conv2' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay) x = x x = block2(x, 128, conv_shortcut=True, name='conv3' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 8): x = block2(x, 128, name='conv3' + '_block' + str(i)) x = block2(x, 128, stride=stride1, name='conv3' + '_block' + str(8), trainable=trainable, weight_decay=weight_decay) x = x x = block2(x, 256, conv_shortcut=True, name='conv4' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 36): x = block2(x, 256, name='conv4' + '_block' + str(i)) x = block2(x, 256, stride=stride1, name='conv4' + '_block' + str(36), trainable=trainable, weight_decay=weight_decay) x = x x = block2(x, 512, conv_shortcut=True, name='conv5' + '_block1', trainable=trainable, weight_decay=weight_decay) for i in range(2, 3): x = block2(x, 512, name='conv5' + '_block' + str(i)) x = block2(x, 512, stride=1, name='conv5' + '_block' + str(3), trainable=trainable, weight_decay=weight_decay) x = x return x return ResNet(stack_fn, True, True, 'resnet152v2', include_top, weights, input_tensor, input_shape, pooling, classes, **kwargs)
deep-learning-models
positive
def save(self, original_obj, obj, autofill_textures): """ Save WoW WMO group data for future export """ print('\nSaving group: <<{}>>'.format(obj.name[:-4])) bpy.context.scene.objects.active = obj mesh = obj.data bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.reveal() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') if len(obj.modifiers): for modifier in obj.modifiers: bpy.ops.object.modifier_apply(modifier=modifier.name) bpy.ops.object.modifier_add(type='TRIANGULATE') bpy.context.object.modifiers['Triangulate'].quad_method = 'BEAUTY' bpy.ops.object.modifier_remove(modifier='Triangulate') bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.quads_convert_to_tris() bpy.ops.mesh.delete_loose() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.uv.select_all(action='SELECT') bpy.ops.uv.seams_from_islands(mark_seams=False, mark_sharp=True) bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers['EdgeSplit'].use_edge_angle = False bpy.ops.object.modifier_apply(apply_as='DATA', modifier='EdgeSplit') obj.select = True bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) obj.select = False bpy.ops.object.mode_set(mode='EDIT') if obj.WowVertexInfo.BatchTypeA != '': bpy.ops.object.vertex_group_set_active(group=obj.WowVertexInfo.BatchTypeA) bpy.ops.object.vertex_group_select() bpy.ops.mesh.split() bpy.ops.mesh.select_all(action='DESELECT') if obj.WowVertexInfo.BatchTypeB != '': bpy.ops.object.vertex_group_set_active(group=obj.WowVertexInfo.BatchTypeB) bpy.ops.object.vertex_group_select() bpy.ops.mesh.split() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') mesh.use_auto_smooth = True if mesh.has_custom_normals: mesh.calc_normals_split() else: bpy.ops.mesh.customdata_custom_splitnormals_add() original_obj.data.calc_normals_split() obj.select = True bpy.context.scene.objects.active = original_obj bpy.ops.object.data_transfer(data_type='CUSTOM_NORMAL') obj.select = False bpy.context.scene.objects.active = obj mesh.calc_normals_split() if len(mesh.vertices) > 65535: print('\nObject <<{}>> contains more vertices ({}) than it is supported. \nMaximum amount of vertices you can use per one object is 65535.'.format(obj.name, len(mesh.vertices))) if len(self.root.momt.Materials) > 256: print('\nScene has exceeded the maximum allowed number of WoW materials (255). Your scene now has {} materials. So, {} extra ones.'.format(len(self.root.momt.Materials), len(self.root.momt.Materials) - 256)) self.mver.Version = 17 material_indices = {} if autofill_textures: obj.select = True bpy.ops.scene.wow_fill_textures() obj.select = False for i in range(len(mesh.materials)): material_indices[i] = self.root.add_material(mesh.materials[i]) poly_batch_map = {} vg_batch_a = None vg_batch_b = None vg_collision = None vg_lightmap = None vg_blendmap = None uv_second_uv = None if obj.WowVertexInfo.BatchTypeA != '': vg_batch_a = obj.vertex_groups.get(obj.WowVertexInfo.BatchTypeA) else: vg_batch_a = obj.vertex_groups.new('BatchMapA') if obj.WowVertexInfo.BatchTypeB != '': vg_batch_b = obj.vertex_groups.get(obj.WowVertexInfo.BatchTypeB) else: vg_batch_b = obj.vertex_groups.new('BatchMapB') if obj.WowVertexInfo.VertexGroup != '': vg_collision = obj.vertex_groups.get(obj.WowVertexInfo.VertexGroup) if obj.WowVertexInfo.Lightmap != '': vg_lightmap = obj.vertex_groups.get(obj.WowVertexInfo.Lightmap) if obj.WowVertexInfo.Blendmap != '': vg_blendmap = obj.vertex_groups.get(obj.WowVertexInfo.Blendmap) self.mogp.Flags |= MOGP_FLAG.HasTwoMOCV self.root.mohd.Flags |= 1 if obj.WowVertexInfo.SecondUV != '': uv_second_uv = obj.data.uv_textures.get(obj.WowVertexInfo.SecondUV) self.mogp.Flags |= MOGP_FLAG.HasTwoMOTV for poly in mesh.polygons: poly_batch_map.setdefault((material_indices.get(poly.material_index), self.get_batch_type(poly, mesh, vg_batch_a.index, vg_batch_b.index)), []).append(poly.index) vertex_size = len(mesh.vertices) n_batches_a = 0 n_batches_b = 0 n_batches_c = 0 for (batch_key, poly_batch) in poly_batch_map.items(): if batch_key[0] != 255: if batch_key[1] == 0: n_batches_a += 1 elif batch_key[1] == 1: n_batches_b += 1 else: n_batches_c += 1 self.moba.Batches = (n_batches_a + n_batches_b + n_batches_c) * [Batch()] self.movt.Vertices = vertex_size * [(0, 0, 0)] self.monr.Normals = vertex_size * [(0, 0, 0)] self.motv.TexCoords = vertex_size * [(0, 0)] self.motv2.TexCoords = vertex_size * [(0, 0)] self.mocv.vertColors = vertex_size * [(127, 127, 127, 0)] self.mocv2.vertColors = vertex_size * [(127, 127, 127, 0)] vertex_map = {} normal_map = {} new_index_last = 0 batch_counter_a = 0 batch_counter_b = 0 batch_counter_c = 0 for (batch_key, poly_batch) in poly_batch_map.items(): first_index = len(self.movi.Indices) sentry_indices = [65535, 0] for poly in poly_batch: collision_counter = 0 for vertex_index in mesh.polygons[poly].vertices: new_index_current = vertex_map.get(vertex_index) if new_index_current is None: vertex_map[vertex_index] = new_index_last new_index_current = new_index_last new_index_last += 1 self.movt.Vertices[new_index_current] = tuple(mesh.vertices[vertex_index].co) sentry_indices[0] = min(sentry_indices[0], new_index_current) sentry_indices[1] = max(sentry_indices[1], new_index_current) self.movi.Indices.append(new_index_current) if vg_collision is not None: for group_info in mesh.vertices[vertex_index].groups: if group_info.group == vg_collision.index: collision_counter += 1 tri_mat = TriangleMaterial() tri_mat.MaterialID = batch_key[0] tri_mat.Flags = 8 if tri_mat.MaterialID == 255 else 32 tri_mat.Flags |= 64 if collision_counter == len(mesh.polygons[poly].vertices) else 4 | 8 for loop_index in mesh.polygons[poly].loop_indices: new_index = vertex_map.get(mesh.loops[loop_index].vertex_index) vertex = mesh.vertices[mesh.loops[loop_index].vertex_index] if len(mesh.uv_layers) > 0: self.motv.TexCoords[new_index] = (mesh.uv_layers.active.data[loop_index].uv[0], 1.0 - mesh.uv_layers.active.data[loop_index].uv[1]) if uv_second_uv: self.motv2.TexCoords[new_index] = (mesh.uv_layers[uv_second_uv.name].data[loop_index].uv[0], 1.0 - mesh.uv_layers[uv_second_uv.name].data[loop_index].uv[1]) if '0' in obj.WowWMOGroup.Flags or (obj.WowWMOGroup.PlaceType == '8192' and '1' not in obj.WowWMOGroup.Flags): if len(mesh.vertex_colors): vertex_color = [127, 127, 127, 0] for i in range(0, 3): vertex_color[i] = round(mesh.vertex_colors.active.data[loop_index].color[3 - i - 1] * 255) if vg_lightmap: for vertex_group_element in vertex.groups: if vertex_group_element.group == vg_lightmap.index: weight = round(vertex_group_element.weight * 255) vertex_color[3] = weight if weight > 0 else 0 if weight > 0: tri_mat.Flags |= 1 self.mocv.vertColors[new_index] = vertex_color elif batch_key == 2: self.mocv.vertColors[new_index] = [127, 127, 127, 0] else: self.mocv.vertColors[new_index] = [127, 127, 127, 255] if vg_blendmap is not None: for vertex_group_element in vertex.groups: if vertex_group_element.group == vg_blendmap.index: try: weight = round(vertex.groups[vg_blendmap.index].weight * 255) except: weight = 1 self.mocv2.vertColors[new_index] = (0, 0, 0, weight if weight > 0 else 0) normal_map.setdefault(new_index, []).append(tuple(mesh.loops[loop_index].normal)) self.mopy.TriangleMaterials.append(tri_mat) n_indices = len(self.movi.Indices) - first_index bounding_box = [32767, 32767, 32767, -32768, -32768, -32768] for poly in poly_batch: for vertex_index in mesh.polygons[poly].vertices: new_index = vertex_map.get(vertex_index) <DeepExtract> normal = [0.0, 0.0, 0.0] for n in normal_map.get(new_index): for i in range(0, 3): normal[i] += n[i] for i in range(0, 3): normal[i] /= len(normal_map.get(new_index)) self.monr.Normals[new_index] = normal </DeepExtract> for i in range(0, 2): for j in range(0, 3): idx = i * 3 + j bounding_box[idx] = min(bounding_box[idx], floor(self.movt.Vertices[new_index][j])) if i == 0 else max(bounding_box[idx], ceil(self.movt.Vertices[new_index][j])) if batch_key[0] == 255: continue batch = Batch() batch.BoundingBox = bounding_box batch.StartTriangle = first_index batch.nTriangle = n_indices batch.StartVertex = sentry_indices[0] batch.LastVertex = sentry_indices[1] batch.MaterialID = batch_key[0] if batch_key[1] == 0: self.moba.Batches[batch_counter_a] = batch batch_counter_a += 1 elif batch_key[1] == 1: self.moba.Batches[n_batches_a + batch_counter_b] = batch batch_counter_b += 1 else: self.moba.Batches[n_batches_a + n_batches_b + batch_counter_c] = batch batch_counter_c += 1 self.mogp.BoundingBoxCorner1 = [32767, 32767, 32767] self.mogp.BoundingBoxCorner2 = [-32768, -32768, -32768] for vtx in self.movt.Vertices: for i in range(0, 3): self.mogp.BoundingBoxCorner1[i] = min(self.mogp.BoundingBoxCorner1[i], floor(vtx[i])) self.mogp.BoundingBoxCorner2[i] = max(self.mogp.BoundingBoxCorner2[i], ceil(vtx[i])) self.mogp.Flags |= MOGP_FLAG.HasCollision if '0' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.HasVertexColor if '4' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.HasSkybox if '1' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.DoNotUseLocalLighting if '2' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.AlwaysDraw if '3' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.IsMountAllowed self.mogp.Flags |= int(obj.WowWMOGroup.PlaceType) hasLights = False fogs = (obj.WowWMOGroup.Fog1, obj.WowWMOGroup.Fog2, obj.WowWMOGroup.Fog3, obj.WowWMOGroup.Fog4) lamps = obj.WowWMOGroup.Relations.Lights objects = bpy.context.scene.objects self.mogp.FogIndices = (objects[fogs[0]].WowFog.FogID if fogs[0] else 0, objects[fogs[1]].WowFog.FogID if fogs[0] else 0, objects[fogs[2]].WowFog.FogID if fogs[0] else 0, objects[fogs[3]].WowFog.FogID if fogs[0] else 0) if lamps: hasLights = True for lamp in lamps: self.molr.LightRefs.append(lamp.id) self.mogp.nBatchesA = n_batches_a self.mogp.nBatchesB = n_batches_b self.mogp.nBatchesC = n_batches_c self.mogp.nBatchesD = 0 self.mogp.GroupID = int(obj.WowWMOGroup.GroupDBCid) self.mogp.Unknown1 = 0 self.mogp.Unknown2 = 0 group_info = self.root.add_group_info(self.mogp.Flags, [self.mogp.BoundingBoxCorner1, self.mogp.BoundingBoxCorner2], original_obj.name, obj.WowWMOGroup.GroupDesc) self.mogp.GroupNameOfs = group_info[0] self.mogp.DescGroupNameOfs = group_info[1] if len(obj.WowWMOGroup.MODR): for doodad in obj.WowWMOGroup.MODR: self.modr.DoodadRefs.append(doodad.value) self.mogp.Flags |= MOGP_FLAG.HasDoodads elif obj.WowWMOGroup.Relations.Doodads: for doodad in obj.WowWMOGroup.Relations.Doodads: self.modr.DoodadRefs.append(doodad.id) self.mogp.Flags |= MOGP_FLAG.HasDoodads else: self.modr = None bsp_tree = BSPTree() bsp_tree.GenerateBSP(self.movt.Vertices, self.movi.Indices, obj.WowVertexInfo.NodeSize) self.mobn.Nodes = bsp_tree.Nodes self.mobr.Faces = bsp_tree.Faces if '0' not in obj.WowWMOGroup.Flags: if obj.WowWMOGroup.PlaceType == '8192': if '1' in obj.WowWMOGroup.Flags and (not len(mesh.vertex_colors)): self.mocv = None else: self.mogp.Flags |= MOGP_FLAG.HasVertexColor else: self.mocv = None if not self.mogp.Flags & MOGP_FLAG.HasWater: self.mliq = None self.mogp.Flags |= MOGP_FLAG.IsNotOcean self.root.mohd.Flags |= 4 self.mogp.LiquidType = int(obj.WowWMOGroup.LiquidType) if not hasLights: self.molr = None else: self.mogp.Flags |= MOGP_FLAG.HasLight if uv_second_uv is None: self.motv2 = None if vg_blendmap is None: self.mocv2 = None print('\nDone saving group: <<{}>>'.format(obj.name[:-4]))
def save(self, original_obj, obj, autofill_textures): """ Save WoW WMO group data for future export """ print('\nSaving group: <<{}>>'.format(obj.name[:-4])) bpy.context.scene.objects.active = obj mesh = obj.data bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.reveal() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') if len(obj.modifiers): for modifier in obj.modifiers: bpy.ops.object.modifier_apply(modifier=modifier.name) bpy.ops.object.modifier_add(type='TRIANGULATE') bpy.context.object.modifiers['Triangulate'].quad_method = 'BEAUTY' bpy.ops.object.modifier_remove(modifier='Triangulate') bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.select_all(action='SELECT') bpy.ops.mesh.quads_convert_to_tris() bpy.ops.mesh.delete_loose() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.uv.select_all(action='SELECT') bpy.ops.uv.seams_from_islands(mark_seams=False, mark_sharp=True) bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.modifier_add(type='EDGE_SPLIT') bpy.context.object.modifiers['EdgeSplit'].use_edge_angle = False bpy.ops.object.modifier_apply(apply_as='DATA', modifier='EdgeSplit') obj.select = True bpy.ops.object.transform_apply(location=True, rotation=True, scale=True) obj.select = False bpy.ops.object.mode_set(mode='EDIT') if obj.WowVertexInfo.BatchTypeA != '': bpy.ops.object.vertex_group_set_active(group=obj.WowVertexInfo.BatchTypeA) bpy.ops.object.vertex_group_select() bpy.ops.mesh.split() bpy.ops.mesh.select_all(action='DESELECT') if obj.WowVertexInfo.BatchTypeB != '': bpy.ops.object.vertex_group_set_active(group=obj.WowVertexInfo.BatchTypeB) bpy.ops.object.vertex_group_select() bpy.ops.mesh.split() bpy.ops.mesh.select_all(action='DESELECT') bpy.ops.object.mode_set(mode='OBJECT') mesh.use_auto_smooth = True if mesh.has_custom_normals: mesh.calc_normals_split() else: bpy.ops.mesh.customdata_custom_splitnormals_add() original_obj.data.calc_normals_split() obj.select = True bpy.context.scene.objects.active = original_obj bpy.ops.object.data_transfer(data_type='CUSTOM_NORMAL') obj.select = False bpy.context.scene.objects.active = obj mesh.calc_normals_split() if len(mesh.vertices) > 65535: print('\nObject <<{}>> contains more vertices ({}) than it is supported. \nMaximum amount of vertices you can use per one object is 65535.'.format(obj.name, len(mesh.vertices))) if len(self.root.momt.Materials) > 256: print('\nScene has exceeded the maximum allowed number of WoW materials (255). Your scene now has {} materials. So, {} extra ones.'.format(len(self.root.momt.Materials), len(self.root.momt.Materials) - 256)) self.mver.Version = 17 material_indices = {} if autofill_textures: obj.select = True bpy.ops.scene.wow_fill_textures() obj.select = False for i in range(len(mesh.materials)): material_indices[i] = self.root.add_material(mesh.materials[i]) poly_batch_map = {} vg_batch_a = None vg_batch_b = None vg_collision = None vg_lightmap = None vg_blendmap = None uv_second_uv = None if obj.WowVertexInfo.BatchTypeA != '': vg_batch_a = obj.vertex_groups.get(obj.WowVertexInfo.BatchTypeA) else: vg_batch_a = obj.vertex_groups.new('BatchMapA') if obj.WowVertexInfo.BatchTypeB != '': vg_batch_b = obj.vertex_groups.get(obj.WowVertexInfo.BatchTypeB) else: vg_batch_b = obj.vertex_groups.new('BatchMapB') if obj.WowVertexInfo.VertexGroup != '': vg_collision = obj.vertex_groups.get(obj.WowVertexInfo.VertexGroup) if obj.WowVertexInfo.Lightmap != '': vg_lightmap = obj.vertex_groups.get(obj.WowVertexInfo.Lightmap) if obj.WowVertexInfo.Blendmap != '': vg_blendmap = obj.vertex_groups.get(obj.WowVertexInfo.Blendmap) self.mogp.Flags |= MOGP_FLAG.HasTwoMOCV self.root.mohd.Flags |= 1 if obj.WowVertexInfo.SecondUV != '': uv_second_uv = obj.data.uv_textures.get(obj.WowVertexInfo.SecondUV) self.mogp.Flags |= MOGP_FLAG.HasTwoMOTV for poly in mesh.polygons: poly_batch_map.setdefault((material_indices.get(poly.material_index), self.get_batch_type(poly, mesh, vg_batch_a.index, vg_batch_b.index)), []).append(poly.index) vertex_size = len(mesh.vertices) n_batches_a = 0 n_batches_b = 0 n_batches_c = 0 for (batch_key, poly_batch) in poly_batch_map.items(): if batch_key[0] != 255: if batch_key[1] == 0: n_batches_a += 1 elif batch_key[1] == 1: n_batches_b += 1 else: n_batches_c += 1 self.moba.Batches = (n_batches_a + n_batches_b + n_batches_c) * [Batch()] self.movt.Vertices = vertex_size * [(0, 0, 0)] self.monr.Normals = vertex_size * [(0, 0, 0)] self.motv.TexCoords = vertex_size * [(0, 0)] self.motv2.TexCoords = vertex_size * [(0, 0)] self.mocv.vertColors = vertex_size * [(127, 127, 127, 0)] self.mocv2.vertColors = vertex_size * [(127, 127, 127, 0)] vertex_map = {} normal_map = {} new_index_last = 0 batch_counter_a = 0 batch_counter_b = 0 batch_counter_c = 0 for (batch_key, poly_batch) in poly_batch_map.items(): first_index = len(self.movi.Indices) sentry_indices = [65535, 0] for poly in poly_batch: collision_counter = 0 for vertex_index in mesh.polygons[poly].vertices: new_index_current = vertex_map.get(vertex_index) if new_index_current is None: vertex_map[vertex_index] = new_index_last new_index_current = new_index_last new_index_last += 1 self.movt.Vertices[new_index_current] = tuple(mesh.vertices[vertex_index].co) sentry_indices[0] = min(sentry_indices[0], new_index_current) sentry_indices[1] = max(sentry_indices[1], new_index_current) self.movi.Indices.append(new_index_current) if vg_collision is not None: for group_info in mesh.vertices[vertex_index].groups: if group_info.group == vg_collision.index: collision_counter += 1 tri_mat = TriangleMaterial() tri_mat.MaterialID = batch_key[0] tri_mat.Flags = 8 if tri_mat.MaterialID == 255 else 32 tri_mat.Flags |= 64 if collision_counter == len(mesh.polygons[poly].vertices) else 4 | 8 for loop_index in mesh.polygons[poly].loop_indices: new_index = vertex_map.get(mesh.loops[loop_index].vertex_index) vertex = mesh.vertices[mesh.loops[loop_index].vertex_index] if len(mesh.uv_layers) > 0: self.motv.TexCoords[new_index] = (mesh.uv_layers.active.data[loop_index].uv[0], 1.0 - mesh.uv_layers.active.data[loop_index].uv[1]) if uv_second_uv: self.motv2.TexCoords[new_index] = (mesh.uv_layers[uv_second_uv.name].data[loop_index].uv[0], 1.0 - mesh.uv_layers[uv_second_uv.name].data[loop_index].uv[1]) if '0' in obj.WowWMOGroup.Flags or (obj.WowWMOGroup.PlaceType == '8192' and '1' not in obj.WowWMOGroup.Flags): if len(mesh.vertex_colors): vertex_color = [127, 127, 127, 0] for i in range(0, 3): vertex_color[i] = round(mesh.vertex_colors.active.data[loop_index].color[3 - i - 1] * 255) if vg_lightmap: for vertex_group_element in vertex.groups: if vertex_group_element.group == vg_lightmap.index: weight = round(vertex_group_element.weight * 255) vertex_color[3] = weight if weight > 0 else 0 if weight > 0: tri_mat.Flags |= 1 self.mocv.vertColors[new_index] = vertex_color elif batch_key == 2: self.mocv.vertColors[new_index] = [127, 127, 127, 0] else: self.mocv.vertColors[new_index] = [127, 127, 127, 255] if vg_blendmap is not None: for vertex_group_element in vertex.groups: if vertex_group_element.group == vg_blendmap.index: try: weight = round(vertex.groups[vg_blendmap.index].weight * 255) except: weight = 1 self.mocv2.vertColors[new_index] = (0, 0, 0, weight if weight > 0 else 0) normal_map.setdefault(new_index, []).append(tuple(mesh.loops[loop_index].normal)) self.mopy.TriangleMaterials.append(tri_mat) n_indices = len(self.movi.Indices) - first_index bounding_box = [32767, 32767, 32767, -32768, -32768, -32768] for poly in poly_batch: for vertex_index in mesh.polygons[poly].vertices: new_index = vertex_map.get(vertex_index) normal = [0.0, 0.0, 0.0] for n in normal_map.get(new_index): for i in range(0, 3): normal[i] += n[i] for i in range(0, 3): normal[i] /= len(normal_map.get(new_index)) self.monr.Normals[new_index] = normal for i in range(0, 2): for j in range(0, 3): idx = i * 3 + j bounding_box[idx] = min(bounding_box[idx], floor(self.movt.Vertices[new_index][j])) if i == 0 else max(bounding_box[idx], ceil(self.movt.Vertices[new_index][j])) if batch_key[0] == 255: continue batch = Batch() batch.BoundingBox = bounding_box batch.StartTriangle = first_index batch.nTriangle = n_indices batch.StartVertex = sentry_indices[0] batch.LastVertex = sentry_indices[1] batch.MaterialID = batch_key[0] if batch_key[1] == 0: self.moba.Batches[batch_counter_a] = batch batch_counter_a += 1 elif batch_key[1] == 1: self.moba.Batches[n_batches_a + batch_counter_b] = batch batch_counter_b += 1 else: self.moba.Batches[n_batches_a + n_batches_b + batch_counter_c] = batch batch_counter_c += 1 self.mogp.BoundingBoxCorner1 = [32767, 32767, 32767] self.mogp.BoundingBoxCorner2 = [-32768, -32768, -32768] for vtx in self.movt.Vertices: for i in range(0, 3): self.mogp.BoundingBoxCorner1[i] = min(self.mogp.BoundingBoxCorner1[i], floor(vtx[i])) self.mogp.BoundingBoxCorner2[i] = max(self.mogp.BoundingBoxCorner2[i], ceil(vtx[i])) self.mogp.Flags |= MOGP_FLAG.HasCollision if '0' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.HasVertexColor if '4' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.HasSkybox if '1' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.DoNotUseLocalLighting if '2' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.AlwaysDraw if '3' in obj.WowWMOGroup.Flags: self.mogp.Flags |= MOGP_FLAG.IsMountAllowed self.mogp.Flags |= int(obj.WowWMOGroup.PlaceType) hasLights = False fogs = (obj.WowWMOGroup.Fog1, obj.WowWMOGroup.Fog2, obj.WowWMOGroup.Fog3, obj.WowWMOGroup.Fog4) lamps = obj.WowWMOGroup.Relations.Lights objects = bpy.context.scene.objects self.mogp.FogIndices = (objects[fogs[0]].WowFog.FogID if fogs[0] else 0, objects[fogs[1]].WowFog.FogID if fogs[0] else 0, objects[fogs[2]].WowFog.FogID if fogs[0] else 0, objects[fogs[3]].WowFog.FogID if fogs[0] else 0) if lamps: hasLights = True for lamp in lamps: self.molr.LightRefs.append(lamp.id) self.mogp.nBatchesA = n_batches_a self.mogp.nBatchesB = n_batches_b self.mogp.nBatchesC = n_batches_c self.mogp.nBatchesD = 0 self.mogp.GroupID = int(obj.WowWMOGroup.GroupDBCid) self.mogp.Unknown1 = 0 self.mogp.Unknown2 = 0 group_info = self.root.add_group_info(self.mogp.Flags, [self.mogp.BoundingBoxCorner1, self.mogp.BoundingBoxCorner2], original_obj.name, obj.WowWMOGroup.GroupDesc) self.mogp.GroupNameOfs = group_info[0] self.mogp.DescGroupNameOfs = group_info[1] if len(obj.WowWMOGroup.MODR): for doodad in obj.WowWMOGroup.MODR: self.modr.DoodadRefs.append(doodad.value) self.mogp.Flags |= MOGP_FLAG.HasDoodads elif obj.WowWMOGroup.Relations.Doodads: for doodad in obj.WowWMOGroup.Relations.Doodads: self.modr.DoodadRefs.append(doodad.id) self.mogp.Flags |= MOGP_FLAG.HasDoodads else: self.modr = None bsp_tree = BSPTree() bsp_tree.GenerateBSP(self.movt.Vertices, self.movi.Indices, obj.WowVertexInfo.NodeSize) self.mobn.Nodes = bsp_tree.Nodes self.mobr.Faces = bsp_tree.Faces if '0' not in obj.WowWMOGroup.Flags: if obj.WowWMOGroup.PlaceType == '8192': if '1' in obj.WowWMOGroup.Flags and (not len(mesh.vertex_colors)): self.mocv = None else: self.mogp.Flags |= MOGP_FLAG.HasVertexColor else: self.mocv = None if not self.mogp.Flags & MOGP_FLAG.HasWater: self.mliq = None self.mogp.Flags |= MOGP_FLAG.IsNotOcean self.root.mohd.Flags |= 4 self.mogp.LiquidType = int(obj.WowWMOGroup.LiquidType) if not hasLights: self.molr = None else: self.mogp.Flags |= MOGP_FLAG.HasLight if uv_second_uv is None: self.motv2 = None if vg_blendmap is None: self.mocv2 = None print('\nDone saving group: <<{}>>'.format(obj.name[:-4]))
Blender-WMO-import-export-scripts
positive
@pytest.mark.parametrize('restrictions, value, expected', [('', 'c', 'b'), ('', 'z', 'y'), ('x', 'y', 'w'), ('qrstu', 'v', 'p'), ('p', 'p', 'o'), ('', 'b', 'a'), ('', '~', 'z')]) def test_get_less_than(restrictions, value, expected): <DeepExtract> cg = CharGenerator() cg.options['min_value'] = 'a' cg.options['max_value'] = 'z' cg.options['restrictions'] = restrictions cg.prepare() cg = cg </DeepExtract> assert cg.get_less_than(value) == expected
@pytest.mark.parametrize('restrictions, value, expected', [('', 'c', 'b'), ('', 'z', 'y'), ('x', 'y', 'w'), ('qrstu', 'v', 'p'), ('p', 'p', 'o'), ('', 'b', 'a'), ('', '~', 'z')]) def test_get_less_than(restrictions, value, expected): cg = CharGenerator() cg.options['min_value'] = 'a' cg.options['max_value'] = 'z' cg.options['restrictions'] = restrictions cg.prepare() cg = cg assert cg.get_less_than(value) == expected
acsploit
positive
@arg.json @arg.project def credits__list(self) -> None: """List claimed credits""" <DeepExtract> if getattr(self.args, 'project', None) and self.args.project: project_name = self.args.project default_project = self.config.get('default_project', '') if raise_if_none and (not default_project): raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.') project_name = default_project </DeepExtract> project_credits = self.client.list_project_credits(project=project_name) layout = [['code', 'remaining_value']] self.print_response(project_credits, json=self.args.json, table_layout=layout)
@arg.json @arg.project def credits__list(self) -> None: """List claimed credits""" if getattr(self.args, 'project', None) and self.args.project: project_name = self.args.project default_project = self.config.get('default_project', '') if raise_if_none and (not default_project): raise argx.UserError('Specify project: use --project in the command line or the default_project item in the config file.') project_name = default_project project_credits = self.client.list_project_credits(project=project_name) layout = [['code', 'remaining_value']] self.print_response(project_credits, json=self.args.json, table_layout=layout)
aiven-client
positive
def sendall(self, content, *args): """ override socket.socket.sendall method to rewrite the header for non-tunneling proxies if needed """ if not self.__httptunnel: <DeepExtract> (host, endpt) = (None, None) hdrs = content.split('\r\n') for hdr in hdrs: if hdr.lower().startswith('host:'): host = hdr elif hdr.lower().startswith('get') or hdr.lower().startswith('post'): endpt = hdr if host and endpt: hdrs.remove(host) hdrs.remove(endpt) host = host.split(' ')[1] endpt = endpt.split(' ') if self.__proxy[4] != None and self.__proxy[5] != None: hdrs.insert(0, self.__getauthheader()) hdrs.insert(0, 'Host: %s' % host) hdrs.insert(0, '%s http://%s%s %s' % (endpt[0], host, endpt[1], endpt[2])) content = '\r\n'.join(hdrs) </DeepExtract> return super(socksocket, self).sendall(content, *args)
def sendall(self, content, *args): """ override socket.socket.sendall method to rewrite the header for non-tunneling proxies if needed """ if not self.__httptunnel: (host, endpt) = (None, None) hdrs = content.split('\r\n') for hdr in hdrs: if hdr.lower().startswith('host:'): host = hdr elif hdr.lower().startswith('get') or hdr.lower().startswith('post'): endpt = hdr if host and endpt: hdrs.remove(host) hdrs.remove(endpt) host = host.split(' ')[1] endpt = endpt.split(' ') if self.__proxy[4] != None and self.__proxy[5] != None: hdrs.insert(0, self.__getauthheader()) hdrs.insert(0, 'Host: %s' % host) hdrs.insert(0, '%s http://%s%s %s' % (endpt[0], host, endpt[1], endpt[2])) content = '\r\n'.join(hdrs) return super(socksocket, self).sendall(content, *args)
CalendarHangout
positive
def compress(self, arr, op, compress_ratio, is_biased): if 'top_k' in op: <DeepExtract> x_data = arr.view(-1) x_len = x_data.nelement() top_k = max(1, int(x_len * (1 - compress_ratio))) if top_k == 1: (_, selected_indices) = torch.max(x_data.abs(), dim=0, keepdim=True) else: (_, selected_indices) = torch.topk(x_data.abs(), top_k, largest=True, sorted=False) (values, indices) = (x_data[selected_indices], selected_indices) </DeepExtract> elif 'random_k' in op: <DeepExtract> x_data = arr.view(-1) x_len = x_data.nelement() top_k = max(1, int(x_len * (1 - compress_ratio))) selected_indices = np.random.choice(x_len, top_k, replace=False) selected_indices = torch.LongTensor(selected_indices).to(arr.device) if is_biased: (values, indices) = (x_data[selected_indices], selected_indices) else: (values, indices) = (x_len / top_k * x_data[selected_indices], selected_indices) </DeepExtract> else: raise NotImplementedError return (values, indices)
def compress(self, arr, op, compress_ratio, is_biased): if 'top_k' in op: x_data = arr.view(-1) x_len = x_data.nelement() top_k = max(1, int(x_len * (1 - compress_ratio))) if top_k == 1: (_, selected_indices) = torch.max(x_data.abs(), dim=0, keepdim=True) else: (_, selected_indices) = torch.topk(x_data.abs(), top_k, largest=True, sorted=False) (values, indices) = (x_data[selected_indices], selected_indices) elif 'random_k' in op: x_data = arr.view(-1) x_len = x_data.nelement() top_k = max(1, int(x_len * (1 - compress_ratio))) selected_indices = np.random.choice(x_len, top_k, replace=False) selected_indices = torch.LongTensor(selected_indices).to(arr.device) if is_biased: (values, indices) = (x_data[selected_indices], selected_indices) else: (values, indices) = (x_len / top_k * x_data[selected_indices], selected_indices) else: raise NotImplementedError return (values, indices)
ChocoSGD
positive
def __call__(self, *args, **kwargs): assert len(args) <= len(self.inputs), 'Too many arguments provided' feed_dict = {} for (inpt, value) in zip(self.inputs, args): <DeepExtract> if issubclass(type(inpt), TfInput): feed_dict.update(inpt.make_feed_dict(value)) elif is_placeholder(inpt): feed_dict[inpt] = value </DeepExtract> kwargs_passed_inpt_names = set() for inpt in self.inputs[len(args):]: inpt_name = inpt.name.split(':')[0] inpt_name = inpt_name.split('/')[-1] assert inpt_name not in kwargs_passed_inpt_names, 'this function has two arguments with the same name "{}", so kwargs cannot be used.'.format(inpt_name) if inpt_name in kwargs: kwargs_passed_inpt_names.add(inpt_name) <DeepExtract> if issubclass(type(inpt), TfInput): feed_dict.update(inpt.make_feed_dict(kwargs.pop(inpt_name))) elif is_placeholder(inpt): feed_dict[inpt] = kwargs.pop(inpt_name) </DeepExtract> else: assert inpt in self.givens, 'Missing argument ' + inpt_name assert len(kwargs) == 0, 'Function got extra arguments ' + str(list(kwargs.keys())) for inpt in self.givens: feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt]) results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1] if self.check_nan: if any((np.isnan(r).any() for r in results)): raise RuntimeError('Nan detected') return results
def __call__(self, *args, **kwargs): assert len(args) <= len(self.inputs), 'Too many arguments provided' feed_dict = {} for (inpt, value) in zip(self.inputs, args): if issubclass(type(inpt), TfInput): feed_dict.update(inpt.make_feed_dict(value)) elif is_placeholder(inpt): feed_dict[inpt] = value kwargs_passed_inpt_names = set() for inpt in self.inputs[len(args):]: inpt_name = inpt.name.split(':')[0] inpt_name = inpt_name.split('/')[-1] assert inpt_name not in kwargs_passed_inpt_names, 'this function has two arguments with the same name "{}", so kwargs cannot be used.'.format(inpt_name) if inpt_name in kwargs: kwargs_passed_inpt_names.add(inpt_name) if issubclass(type(inpt), TfInput): feed_dict.update(inpt.make_feed_dict(kwargs.pop(inpt_name))) elif is_placeholder(inpt): feed_dict[inpt] = kwargs.pop(inpt_name) else: assert inpt in self.givens, 'Missing argument ' + inpt_name assert len(kwargs) == 0, 'Function got extra arguments ' + str(list(kwargs.keys())) for inpt in self.givens: feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt]) results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1] if self.check_nan: if any((np.isnan(r).any() for r in results)): raise RuntimeError('Nan detected') return results
2017-learning-to-run
positive
def _describe_schema(name, val, depth, max_depth): val_type = type(val) ret = {} if name is not None: ret['name'] = name ret['class'] = fqname(val_type) <DeepExtract> (islist, isdict) = (issubclass(val_type, list), issubclass(val_type, dict)) </DeepExtract> if not (islist or isdict) or (depth >= max_depth and isdict): return ret if islist: elts = [(None, v) for v in val] schema_key = 'items' elif isdict: elts = val.items() schema_key = 'properties' schema = [_describe_schema(k, v, depth + 1, max_depth) for (k, v) in elts] if any(schema): ret[schema_key] = schema return ret
def _describe_schema(name, val, depth, max_depth): val_type = type(val) ret = {} if name is not None: ret['name'] = name ret['class'] = fqname(val_type) (islist, isdict) = (issubclass(val_type, list), issubclass(val_type, dict)) if not (islist or isdict) or (depth >= max_depth and isdict): return ret if islist: elts = [(None, v) for v in val] schema_key = 'items' elif isdict: elts = val.items() schema_key = 'properties' schema = [_describe_schema(k, v, depth + 1, max_depth) for (k, v) in elts] if any(schema): ret[schema_key] = schema return ret
appmap-python
positive
def cd_score(image_true, image_pred, raster_res, **kwargs): """Computes IoU metric between ground-truth and predicted vectors. See `vectran.metrics.raster_metrics.iou_score` for reference.""" <DeepExtract> if isinstance(image_true, Dict): raster = render(image_true, raster_res, data_representation) elif isinstance(image_true, List): raster = np.array([render(vector, raster_res, data_representation) for vector in image_true]) elif isinstance(image_true, np.ndarray): raster = image_true else: raise TypeError('parameter image of unknown type') raster_true = raster </DeepExtract> <DeepExtract> if isinstance(image_pred, Dict): raster = render(image_pred, raster_res, data_representation) elif isinstance(image_pred, List): raster = np.array([render(vector, raster_res, data_representation) for vector in image_pred]) elif isinstance(image_pred, np.ndarray): raster = image_pred else: raise TypeError('parameter image of unknown type') raster_pred = raster </DeepExtract> return r.cd_score(raster_true, raster_pred, **kwargs)
def cd_score(image_true, image_pred, raster_res, **kwargs): """Computes IoU metric between ground-truth and predicted vectors. See `vectran.metrics.raster_metrics.iou_score` for reference.""" if isinstance(image_true, Dict): raster = render(image_true, raster_res, data_representation) elif isinstance(image_true, List): raster = np.array([render(vector, raster_res, data_representation) for vector in image_true]) elif isinstance(image_true, np.ndarray): raster = image_true else: raise TypeError('parameter image of unknown type') raster_true = raster if isinstance(image_pred, Dict): raster = render(image_pred, raster_res, data_representation) elif isinstance(image_pred, List): raster = np.array([render(vector, raster_res, data_representation) for vector in image_pred]) elif isinstance(image_pred, np.ndarray): raster = image_pred else: raise TypeError('parameter image of unknown type') raster_pred = raster return r.cd_score(raster_true, raster_pred, **kwargs)
Deep-Vectorization-of-Technical-Drawings
positive
def decrypt_file_aes(in_file, out_file, password, key_length=32): bs = AES.block_size salt = in_file.read(bs)[len('Salted__'):] <DeepExtract> d = d_i = '' while len(d) < key_length + bs: d_i = md5(d_i + password + salt).digest() d += d_i (key, iv) = (d[:key_length], d[key_length:key_length + bs]) </DeepExtract> cipher = AES.new(key, AES.MODE_CBC, iv) next_chunk = '' finished = False while not finished: (chunk, next_chunk) = (next_chunk, cipher.decrypt(in_file.read(1024 * bs))) if len(next_chunk) == 0: padding_length = ord(chunk[-1]) if padding_length < 1 or padding_length > bs: raise ValueError('bad decrypt pad (%d)' % padding_length) if chunk[-padding_length:] != padding_length * chr(padding_length): raise ValueError('bad decrypt') chunk = chunk[:-padding_length] finished = True out_file.write(chunk)
def decrypt_file_aes(in_file, out_file, password, key_length=32): bs = AES.block_size salt = in_file.read(bs)[len('Salted__'):] d = d_i = '' while len(d) < key_length + bs: d_i = md5(d_i + password + salt).digest() d += d_i (key, iv) = (d[:key_length], d[key_length:key_length + bs]) cipher = AES.new(key, AES.MODE_CBC, iv) next_chunk = '' finished = False while not finished: (chunk, next_chunk) = (next_chunk, cipher.decrypt(in_file.read(1024 * bs))) if len(next_chunk) == 0: padding_length = ord(chunk[-1]) if padding_length < 1 or padding_length > bs: raise ValueError('bad decrypt pad (%d)' % padding_length) if chunk[-padding_length:] != padding_length * chr(padding_length): raise ValueError('bad decrypt') chunk = chunk[:-padding_length] finished = True out_file.write(chunk)
EasyStorj
positive
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new histogram metric and a reservoir from the given parameters """ <DeepExtract> try: reservoir_cls = RESERVOIR_TYPES[reservoir_type] except KeyError: raise InvalidMetricError('Unknown reservoir type: {}'.format(reservoir_type)) reservoir = reservoir_cls(*reservoir_args, **reservoir_kwargs) </DeepExtract> return new_histogram(name, reservoir)
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new histogram metric and a reservoir from the given parameters """ try: reservoir_cls = RESERVOIR_TYPES[reservoir_type] except KeyError: raise InvalidMetricError('Unknown reservoir type: {}'.format(reservoir_type)) reservoir = reservoir_cls(*reservoir_args, **reservoir_kwargs) return new_histogram(name, reservoir)
appmetrics
positive
def __init__(self, maxval=100, widgets=default_widgets, term_width=None, fd=sys.stderr, force_update=False): assert maxval > 0 self.maxval = maxval self.widgets = widgets self.fd = fd self.signal_set = False if term_width is None: try: <DeepExtract> (h, w) = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\x00' * 8))[:2] self.term_width = w </DeepExtract> signal.signal(signal.SIGWINCH, self.handle_resize) self.signal_set = True except: self.term_width = 79 else: self.term_width = term_width self.currval = 0 self.finished = False self.prev_percentage = -1 self.start_time = None self.seconds_elapsed = 0 self.force_update = force_update
def __init__(self, maxval=100, widgets=default_widgets, term_width=None, fd=sys.stderr, force_update=False): assert maxval > 0 self.maxval = maxval self.widgets = widgets self.fd = fd self.signal_set = False if term_width is None: try: (h, w) = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\x00' * 8))[:2] self.term_width = w signal.signal(signal.SIGWINCH, self.handle_resize) self.signal_set = True except: self.term_width = 79 else: self.term_width = term_width self.currval = 0 self.finished = False self.prev_percentage = -1 self.start_time = None self.seconds_elapsed = 0 self.force_update = force_update
camr
positive
def get_item_keys(self, processor=None): """ Get item attribute names It can be useful to know what attributes an item in the dataset is stored with, e.g. when one wants to produce a new dataset identical to the source_dataset one but with extra attributes. This method provides these, as a list. :param BasicProcessor processor: A reference to the processor asking for the item keys, to pass on to iterate_itesm :return list: List of keys, may be empty if there are no items in the dataset """ <DeepExtract> path = self.get_results_path() item_mapper = None if not bypass_map_item: own_processor = self.get_own_processor() extension_fits = hasattr(own_processor, 'extension') and own_processor.extension == self.get_extension() if hasattr(own_processor, 'map_item') and extension_fits: item_mapper = own_processor.map_item if path.suffix.lower() == '.csv': with path.open('rb') as infile: wrapped_infile = NullAwareTextIOWrapper(infile, encoding='utf-8') reader = csv.DictReader(wrapped_infile) for item in reader: if hasattr(processor, 'interrupted') and processor.interrupted: raise ProcessorInterruptedException('Processor interrupted while iterating through CSV file') if item_mapper: item = item_mapper(item) yield item elif path.suffix.lower() == '.ndjson': with path.open(encoding='utf-8') as infile: for line in infile: if hasattr(processor, 'interrupted') and processor.interrupted: raise ProcessorInterruptedException('Processor interrupted while iterating through NDJSON file') item = json.loads(line) if item_mapper: item = item_mapper(item) yield item else: raise NotImplementedError('Cannot iterate through %s file' % path.suffix) </DeepExtract> try: keys = list(items.__next__().keys()) except StopIteration: return [] finally: del items return keys
def get_item_keys(self, processor=None): """ Get item attribute names It can be useful to know what attributes an item in the dataset is stored with, e.g. when one wants to produce a new dataset identical to the source_dataset one but with extra attributes. This method provides these, as a list. :param BasicProcessor processor: A reference to the processor asking for the item keys, to pass on to iterate_itesm :return list: List of keys, may be empty if there are no items in the dataset """ path = self.get_results_path() item_mapper = None if not bypass_map_item: own_processor = self.get_own_processor() extension_fits = hasattr(own_processor, 'extension') and own_processor.extension == self.get_extension() if hasattr(own_processor, 'map_item') and extension_fits: item_mapper = own_processor.map_item if path.suffix.lower() == '.csv': with path.open('rb') as infile: wrapped_infile = NullAwareTextIOWrapper(infile, encoding='utf-8') reader = csv.DictReader(wrapped_infile) for item in reader: if hasattr(processor, 'interrupted') and processor.interrupted: raise ProcessorInterruptedException('Processor interrupted while iterating through CSV file') if item_mapper: item = item_mapper(item) yield item elif path.suffix.lower() == '.ndjson': with path.open(encoding='utf-8') as infile: for line in infile: if hasattr(processor, 'interrupted') and processor.interrupted: raise ProcessorInterruptedException('Processor interrupted while iterating through NDJSON file') item = json.loads(line) if item_mapper: item = item_mapper(item) yield item else: raise NotImplementedError('Cannot iterate through %s file' % path.suffix) try: keys = list(items.__next__().keys()) except StopIteration: return [] finally: del items return keys
4cat
positive
def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: <DeepExtract> order = -1 </DeepExtract> else: order = -1 if order >= 0: self._mTotalChars += 1 if order < self._mTableSize: if 512 > self._mCharToFreqOrder[order]: self._mFreqChars += 1
def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: order = -1 else: order = -1 if order >= 0: self._mTotalChars += 1 if order < self._mTableSize: if 512 > self._mCharToFreqOrder[order]: self._mFreqChars += 1
CMS-Exploit-Framework
positive
def get_global_option_bool(section, key, default_val, env_name=None): <DeepExtract> global global_opts if not env_name is None and env_name in os.environ: val = os.environ[env_name] if global_opts.has_option(section, key): val = global_opts.get(section, key) val = default_val </DeepExtract> if not val: return False val = str(val).lower().strip() return len(val) > 0 and '0' != val and ('false' != val) and ('no' != val) and ('disable' != val)
def get_global_option_bool(section, key, default_val, env_name=None): global global_opts if not env_name is None and env_name in os.environ: val = os.environ[env_name] if global_opts.has_option(section, key): val = global_opts.get(section, key) val = default_val if not val: return False val = str(val).lower().strip() return len(val) > 0 and '0' != val and ('false' != val) and ('no' != val) and ('disable' != val)
atsf4g-co
positive
def optimize_parameters(self): """One forward & backward pass. One update of parameters""" 'forward pass' with torch.no_grad(): self.siam.model.template(self.template_clean255) self.score_maps_clean = self.siam.get_heat_map(self.X_crops, softmax=True) <DeepExtract> template128_clean = torch.zeros(1, 3, 128, 128).cuda() template128_clean[:, :, 1:, 1:] = self.template_clean1 template128_adv = template128_clean + self.netG(template128_clean) self.template_adv1 = template128_adv[:, :, 1:, 1:] self.template_adv255 = self.template_adv1 * 127.5 + 127.5 </DeepExtract> self.siam.model.template(self.template_adv255) (self.score_maps_adv, self.reg_res_adv) = self.siam.get_cls_reg(self.X_crops, softmax=False) 'backward pass' self.optimizer_G.zero_grad() <DeepExtract> self.loss_G_L2 = self.criterionL2(self.template_adv1, self.template_clean1) * self.weight_L2 attention_mask = self.score_maps_clean > cls_thres num_attention = int(torch.sum(attention_mask)) if num_attention > 0: score_map_adv_att = self.score_maps_adv[attention_mask] reg_adv_att = self.reg_res_adv[2:4, attention_mask] 'original loss(2019.9.8)' 'new loss(hinge loss)(2019.9.9)' self.loss_cls = torch.mean(torch.clamp(score_map_adv_att[:, 1] - score_map_adv_att[:, 0], min=self.cls_margin)) * self.weight_cls self.loss_reg = (torch.mean(torch.clamp(reg_adv_att[0, :], min=self.side_margin1)) + torch.mean(torch.clamp(reg_adv_att[1, :], min=self.side_margin2))) * self.weight_reg self.loss_G = self.loss_G_L2 + self.loss_cls + self.loss_reg else: self.loss_G = self.loss_G_L2 self.loss_G.backward() </DeepExtract> self.optimizer_G.step()
def optimize_parameters(self): """One forward & backward pass. One update of parameters""" 'forward pass' with torch.no_grad(): self.siam.model.template(self.template_clean255) self.score_maps_clean = self.siam.get_heat_map(self.X_crops, softmax=True) template128_clean = torch.zeros(1, 3, 128, 128).cuda() template128_clean[:, :, 1:, 1:] = self.template_clean1 template128_adv = template128_clean + self.netG(template128_clean) self.template_adv1 = template128_adv[:, :, 1:, 1:] self.template_adv255 = self.template_adv1 * 127.5 + 127.5 self.siam.model.template(self.template_adv255) (self.score_maps_adv, self.reg_res_adv) = self.siam.get_cls_reg(self.X_crops, softmax=False) 'backward pass' self.optimizer_G.zero_grad() self.loss_G_L2 = self.criterionL2(self.template_adv1, self.template_clean1) * self.weight_L2 attention_mask = self.score_maps_clean > cls_thres num_attention = int(torch.sum(attention_mask)) if num_attention > 0: score_map_adv_att = self.score_maps_adv[attention_mask] reg_adv_att = self.reg_res_adv[2:4, attention_mask] 'original loss(2019.9.8)' 'new loss(hinge loss)(2019.9.9)' self.loss_cls = torch.mean(torch.clamp(score_map_adv_att[:, 1] - score_map_adv_att[:, 0], min=self.cls_margin)) * self.weight_cls self.loss_reg = (torch.mean(torch.clamp(reg_adv_att[0, :], min=self.side_margin1)) + torch.mean(torch.clamp(reg_adv_att[1, :], min=self.side_margin2))) * self.weight_reg self.loss_G = self.loss_G_L2 + self.loss_cls + self.loss_reg else: self.loss_G = self.loss_G_L2 self.loss_G.backward() self.optimizer_G.step()
CSA
positive
def preset_modules(self, *modules): """ [UNIT]... -- set 'enabled' when in *.preset """ if self.user_mode(): logg.warning('preset makes no sense in --user mode') return True found_all = True units = [] for module in modules: <DeepExtract> found = [] for unit in self.match_sysd_units(module, suffix): if unit not in found: found.append(unit) for unit in self.match_sysv_units(module, suffix): if unit not in found: found.append(unit) matched = found </DeepExtract> if not matched: logg.error('Unit %s could not be found.', unit_of(module)) found_all = False continue for unit in matched: if unit not in units: units += [unit] return self.preset_units(units) and found_all
def preset_modules(self, *modules): """ [UNIT]... -- set 'enabled' when in *.preset """ if self.user_mode(): logg.warning('preset makes no sense in --user mode') return True found_all = True units = [] for module in modules: found = [] for unit in self.match_sysd_units(module, suffix): if unit not in found: found.append(unit) for unit in self.match_sysv_units(module, suffix): if unit not in found: found.append(unit) matched = found if not matched: logg.error('Unit %s could not be found.', unit_of(module)) found_all = False continue for unit in matched: if unit not in units: units += [unit] return self.preset_units(units) and found_all
deployment
positive
def XceptionModel(input_image, num_classes, is_training=False, has_bn=False, data_format='channels_last'): feature_dict = {} bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.layers.conv2d(input_image, 32, (3, 3), use_bias=False, name='block1_conv1', strides=(2, 2), padding='valid', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv1_act') inputs = tf.layers.conv2d(inputs, 64, (3, 3), use_bias=False, name='block1_conv2', strides=(1, 1), padding='valid', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv2_act') residual = tf.layers.conv2d(inputs, 128, (1, 1), use_bias=False, name='conv2d_1', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv1', reuse=None) <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block2_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv2', reuse=None) inputs = inputs </DeepExtract> inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block2_pool') feature_dict['C2'] = inputs inputs = tf.add(inputs, residual, name='residual_add_0') residual = tf.layers.conv2d(inputs, 256, (1, 1), use_bias=False, name='conv2d_2', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block3_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 256, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block3_sepconv1', reuse=None) inputs = inputs </DeepExtract> <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block3_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 256, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block3_sepconv2', reuse=None) inputs = inputs </DeepExtract> inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block3_pool') inputs = tf.add(inputs, residual, name='residual_add_1') feature_dict['C3'] = inputs residual = tf.layers.conv2d(inputs, 728, (1, 1), use_bias=False, name='conv2d_3', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block4_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block4_sepconv1', reuse=None) inputs = inputs </DeepExtract> <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block4_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block4_sepconv2', reuse=None) inputs = inputs </DeepExtract> inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block4_pool') inputs = tf.add(inputs, residual, name='residual_add_2') feature_dict['C4'] = inputs for index in range(8): residual = inputs prefix = 'block' + str(index + 5) <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv1', reuse=None) inputs = inputs </DeepExtract> <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv2', reuse=None) inputs = inputs </DeepExtract> <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv3' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv3', reuse=None) inputs = inputs </DeepExtract> inputs = tf.add(inputs, residual, name=prefix + '_residual_add') residual = tf.layers.conv2d(inputs, 1024, (1, 1), use_bias=False, name='conv2d_4', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block13_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block13_sepconv1', reuse=None) inputs = inputs </DeepExtract> <DeepExtract> bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block13_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 1024, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block13_sepconv2', reuse=None) inputs = inputs </DeepExtract> inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block13_pool') inputs = tf.add(inputs, residual, name='residual_add_3') feature_dict['C5'] = inputs return feature_dict
def XceptionModel(input_image, num_classes, is_training=False, has_bn=False, data_format='channels_last'): feature_dict = {} bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.layers.conv2d(input_image, 32, (3, 3), use_bias=False, name='block1_conv1', strides=(2, 2), padding='valid', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv1_act') inputs = tf.layers.conv2d(inputs, 64, (3, 3), use_bias=False, name='block1_conv2', strides=(1, 1), padding='valid', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.nn.relu(inputs, name='block1_conv2_act') residual = tf.layers.conv2d(inputs, 128, (1, 1), use_bias=False, name='conv2d_1', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv1', reuse=None) bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block2_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 128, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block2_sepconv2', reuse=None) inputs = inputs inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block2_pool') feature_dict['C2'] = inputs inputs = tf.add(inputs, residual, name='residual_add_0') residual = tf.layers.conv2d(inputs, 256, (1, 1), use_bias=False, name='conv2d_2', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block3_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 256, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block3_sepconv1', reuse=None) inputs = inputs bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block3_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 256, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block3_sepconv2', reuse=None) inputs = inputs inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block3_pool') inputs = tf.add(inputs, residual, name='residual_add_1') feature_dict['C3'] = inputs residual = tf.layers.conv2d(inputs, 728, (1, 1), use_bias=False, name='conv2d_3', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block4_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block4_sepconv1', reuse=None) inputs = inputs bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block4_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block4_sepconv2', reuse=None) inputs = inputs inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block4_pool') inputs = tf.add(inputs, residual, name='residual_add_2') feature_dict['C4'] = inputs for index in range(8): residual = inputs prefix = 'block' + str(index + 5) bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv1', reuse=None) inputs = inputs bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv2', reuse=None) inputs = inputs bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name=prefix + '_sepconv3' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name=prefix + '_sepconv3', reuse=None) inputs = inputs inputs = tf.add(inputs, residual, name=prefix + '_residual_add') residual = tf.layers.conv2d(inputs, 1024, (1, 1), use_bias=False, name='conv2d_4', strides=(2, 2), padding='same', data_format=data_format, activation=None, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer()) bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block13_sepconv1' + '_act') inputs = tf.layers.separable_conv2d(inputs, 728, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block13_sepconv1', reuse=None) inputs = inputs bn_axis = -1 if data_format == 'channels_last' else 1 inputs = tf.nn.relu(inputs, name='block13_sepconv2' + '_act') inputs = tf.layers.separable_conv2d(inputs, 1024, (3, 3), strides=(1, 1), padding='same', data_format=data_format, activation=None, use_bias=False, depthwise_initializer=tf.contrib.layers.xavier_initializer(), pointwise_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer(), name='block13_sepconv2', reuse=None) inputs = inputs inputs = tf.layers.max_pooling2d(inputs, pool_size=(3, 3), strides=(2, 2), padding='same', data_format=data_format, name='block13_pool') inputs = tf.add(inputs, residual, name='residual_add_3') feature_dict['C5'] = inputs return feature_dict
DCL_RetinaNet_Tensorflow
positive
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = 0.0001 lr_dis = 1e-06 self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))) self.loss.start_log() self.model.train() (timer_data, timer_model) = (utility.timer(), utility.timer()) criterion_ssim = pytorch_ssim.SSIM(window_size=11) criterion_mse = nn.MSELoss() adversarial_criterion = nn.MSELoss() <DeepExtract> vgg_model = torchvision.models.vgg16(pretrained=False).cuda() vgg_model.load_state_dict(torch.load('./vgg16-397923af.pth')) trainable(vgg_model, False) vgg_model = vgg_model </DeepExtract> vgg = vgg_v2(vgg_model) vgg.eval() for (batch, (lr, hr, lrr, hq, _, idx_scale)) in enumerate(self.loader_train): <DeepExtract> device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() (lr, hr, lrr, hq) = tensor.to(device) (lr, hr, lrr, hq) = [_prepare(a) for a in args] </DeepExtract> timer_data.hold() timer_model.tic() self.optimizer.zero_grad() lr = lr / 255.0 hr = hr / 255.0 hq = hq / 255.0 lrr = lrr / 255.0 [b, c, h, w] = hr.shape (phr1, phr2, phr3) = self.model(lr, 3) Img_up = nn.Upsample(scale_factor=2, mode='bilinear') Img_up_4x = nn.Upsample(scale_factor=4, mode='bilinear') phr1_2 = Img_up_4x(phr3) phr2_2 = Img_up(phr2) phr3_2 = phr1 (phr1_r, phr2_r, phr3_r) = self.model(lrr, 3) phr1_2_r = Img_up_4x(phr3_r) phr2_2_r = Img_up(phr2_r) phr3_2_r = phr1_r input_step2 = [lr, phr1_2, phr2_2, phr3_2] input_step2_r = [lrr, phr1_2_r, phr2_2_r, phr3_2_r] (phr, _, _, _) = self.recompose(input_step2, 3) (phr_r, _, _, _) = self.recompose(input_step2_r, 3) target_real = (torch.rand(self.args.batch_size * 2, 1) * 0.5 + 0.7).cuda() target_fake = (torch.rand(self.args.batch_size, 1) * 0.3).cuda() ones_const = torch.ones(self.args.batch_size, 1).cuda() self.optimizer_dis.zero_grad() hr_all = torch.cat((hr, hq), 0) (hr_all_sorted, _) = hr_all.sort(dim=1) (phr_r_sorted, _) = phr_r.sort(dim=1) hr_all_full = torch.cat((hr_all, hr_all_sorted), 1) phr_r_full = torch.cat((phr_r, phr_r_sorted), 1) discriminator_loss = adversarial_criterion(self.dis(hr_all_full, 3), target_real) + adversarial_criterion(self.dis(phr_r_full, 3), target_fake) discriminator_loss.backward(retain_graph=True) self.optimizer_dis.step() self.optimizer.zero_grad() rect_loss = vgg_loss(vgg, phr, hr) + criterion_ssim(phr, hr) generator_adversarial_loss = adversarial_criterion(self.dis(phr_r_full, 3), ones_const) full_loss = rect_loss + 1 * generator_adversarial_loss full_loss.backward() self.optimizer.step() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{}\t{}\t{:.1f}+{:.1f}s'.format((batch + 1) * self.args.batch_size, len(self.loader_train.dataset), discriminator_loss.item(), rect_loss.item(), generator_adversarial_loss.item(), timer_model.release(), timer_data.release())) self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
def train(self): self.scheduler.step() self.loss.step() epoch = self.scheduler.last_epoch + 1 lr = 0.0001 lr_dis = 1e-06 self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))) self.loss.start_log() self.model.train() (timer_data, timer_model) = (utility.timer(), utility.timer()) criterion_ssim = pytorch_ssim.SSIM(window_size=11) criterion_mse = nn.MSELoss() adversarial_criterion = nn.MSELoss() vgg_model = torchvision.models.vgg16(pretrained=False).cuda() vgg_model.load_state_dict(torch.load('./vgg16-397923af.pth')) trainable(vgg_model, False) vgg_model = vgg_model vgg = vgg_v2(vgg_model) vgg.eval() for (batch, (lr, hr, lrr, hq, _, idx_scale)) in enumerate(self.loader_train): device = torch.device('cpu' if self.args.cpu else 'cuda') def _prepare(tensor): if self.args.precision == 'half': tensor = tensor.half() (lr, hr, lrr, hq) = tensor.to(device) (lr, hr, lrr, hq) = [_prepare(a) for a in args] timer_data.hold() timer_model.tic() self.optimizer.zero_grad() lr = lr / 255.0 hr = hr / 255.0 hq = hq / 255.0 lrr = lrr / 255.0 [b, c, h, w] = hr.shape (phr1, phr2, phr3) = self.model(lr, 3) Img_up = nn.Upsample(scale_factor=2, mode='bilinear') Img_up_4x = nn.Upsample(scale_factor=4, mode='bilinear') phr1_2 = Img_up_4x(phr3) phr2_2 = Img_up(phr2) phr3_2 = phr1 (phr1_r, phr2_r, phr3_r) = self.model(lrr, 3) phr1_2_r = Img_up_4x(phr3_r) phr2_2_r = Img_up(phr2_r) phr3_2_r = phr1_r input_step2 = [lr, phr1_2, phr2_2, phr3_2] input_step2_r = [lrr, phr1_2_r, phr2_2_r, phr3_2_r] (phr, _, _, _) = self.recompose(input_step2, 3) (phr_r, _, _, _) = self.recompose(input_step2_r, 3) target_real = (torch.rand(self.args.batch_size * 2, 1) * 0.5 + 0.7).cuda() target_fake = (torch.rand(self.args.batch_size, 1) * 0.3).cuda() ones_const = torch.ones(self.args.batch_size, 1).cuda() self.optimizer_dis.zero_grad() hr_all = torch.cat((hr, hq), 0) (hr_all_sorted, _) = hr_all.sort(dim=1) (phr_r_sorted, _) = phr_r.sort(dim=1) hr_all_full = torch.cat((hr_all, hr_all_sorted), 1) phr_r_full = torch.cat((phr_r, phr_r_sorted), 1) discriminator_loss = adversarial_criterion(self.dis(hr_all_full, 3), target_real) + adversarial_criterion(self.dis(phr_r_full, 3), target_fake) discriminator_loss.backward(retain_graph=True) self.optimizer_dis.step() self.optimizer.zero_grad() rect_loss = vgg_loss(vgg, phr, hr) + criterion_ssim(phr, hr) generator_adversarial_loss = adversarial_criterion(self.dis(phr_r_full, 3), ones_const) full_loss = rect_loss + 1 * generator_adversarial_loss full_loss.backward() self.optimizer.step() if (batch + 1) % self.args.print_every == 0: self.ckp.write_log('[{}/{}]\t{}\t{}\t{}\t{:.1f}+{:.1f}s'.format((batch + 1) * self.args.batch_size, len(self.loader_train.dataset), discriminator_loss.item(), rect_loss.item(), generator_adversarial_loss.item(), timer_model.release(), timer_data.release())) self.loss.end_log(len(self.loader_train)) self.error_last = self.loss.log[-1, -1]
CVPR-2020-Semi-Low-Light
positive
def _reset(self, car_pose): """Reset camera position based on the track size""" if self.is_reset_called: return <DeepExtract> (x_min, y_min, x_max, y_max) = self.track_data.outer_border.bounds model_pose = Pose() model_pose.position.x = (x_min + x_max) / 2.0 model_pose.position.y = (y_min + y_max) / 2.0 model_pose.position.z = CAMERA_HEIGHT (x, y, z, w) = euler_to_quaternion(roll=1.57079, pitch=1.57079, yaw=3.14159) model_pose.orientation.x = x model_pose.orientation.y = y model_pose.orientation.z = z model_pose.orientation.w = w model_pose = model_pose </DeepExtract> camera_model_state = ModelState() camera_model_state.model_name = self.model_name camera_model_state.pose = model_pose SetModelStateTracker.get_instance().set_model_state(camera_model_state)
def _reset(self, car_pose): """Reset camera position based on the track size""" if self.is_reset_called: return (x_min, y_min, x_max, y_max) = self.track_data.outer_border.bounds model_pose = Pose() model_pose.position.x = (x_min + x_max) / 2.0 model_pose.position.y = (y_min + y_max) / 2.0 model_pose.position.z = CAMERA_HEIGHT (x, y, z, w) = euler_to_quaternion(roll=1.57079, pitch=1.57079, yaw=3.14159) model_pose.orientation.x = x model_pose.orientation.y = y model_pose.orientation.z = z model_pose.orientation.w = w model_pose = model_pose camera_model_state = ModelState() camera_model_state.model_name = self.model_name camera_model_state.pose = model_pose SetModelStateTracker.get_instance().set_model_state(camera_model_state)
deepracer-local
positive
def get_unet_small2(nClasses, input_height=128, input_width=128, n_filters=16, dropout=0.1, batchnorm=True, n_channels=3): input_img = Input(shape=(input_height, input_width, n_channels)) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x </DeepExtract> p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) <DeepExtract> x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x </DeepExtract> u3 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c2) u3 = concatenate([u3, c1]) u3 = Dropout(dropout)(u3) <DeepExtract> x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x </DeepExtract> outputs = Conv2D(1, (1, 1), activation='relu')(c3) model = Model(inputs=[input_img], outputs=[outputs]) return model
def get_unet_small2(nClasses, input_height=128, input_width=128, n_filters=16, dropout=0.1, batchnorm=True, n_channels=3): input_img = Input(shape=(input_height, input_width, n_channels)) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(input_img) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c1 = x p1 = MaxPooling2D((2, 2))(c1) p1 = Dropout(dropout)(p1) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(p1) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 4, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c2 = x u3 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c2) u3 = concatenate([u3, c1]) u3 = Dropout(dropout)(u3) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(u3) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=n_filters * 1, kernel_size=(3, 3), kernel_initializer='he_normal', padding='same')(x) if batchnorm: x = BatchNormalization()(x) x = Activation('relu')(x) c3 = x outputs = Conv2D(1, (1, 1), activation='relu')(c3) model = Model(inputs=[input_img], outputs=[outputs]) return model
activefire
positive
def build_concept_instance_table(aser_conceptualizer, erows): cid2concept = dict() concept_instance_pairs = [] cid_to_filter_score = dict() for erow in tqdm(erows): <DeepExtract> eventuality = Eventuality().decode(erow['info']) eventuality.eid = erow['_id'] eventuality.frequency = erow['frequency'] eventuality.pattern = erow['pattern'] event = eventuality </DeepExtract> results = aser_conceptualizer.conceptualize(event) for (concept, score) in results: if concept.cid not in cid2concept: cid2concept[concept.cid] = copy.copy(concept) concept = cid2concept[concept.cid] if (event.eid, event.pattern, score) not in concept.instances: concept.instances.append((event.eid, event.pattern, score)) if concept.cid not in cid_to_filter_score: cid_to_filter_score[concept.cid] = 0.0 cid_to_filter_score[concept.cid] += score * event.frequency concept_instance_pairs.append((concept, event, score)) return (cid2concept, concept_instance_pairs, cid_to_filter_score)
def build_concept_instance_table(aser_conceptualizer, erows): cid2concept = dict() concept_instance_pairs = [] cid_to_filter_score = dict() for erow in tqdm(erows): eventuality = Eventuality().decode(erow['info']) eventuality.eid = erow['_id'] eventuality.frequency = erow['frequency'] eventuality.pattern = erow['pattern'] event = eventuality results = aser_conceptualizer.conceptualize(event) for (concept, score) in results: if concept.cid not in cid2concept: cid2concept[concept.cid] = copy.copy(concept) concept = cid2concept[concept.cid] if (event.eid, event.pattern, score) not in concept.instances: concept.instances.append((event.eid, event.pattern, score)) if concept.cid not in cid_to_filter_score: cid_to_filter_score[concept.cid] = 0.0 cid_to_filter_score[concept.cid] += score * event.frequency concept_instance_pairs.append((concept, event, score)) return (cid2concept, concept_instance_pairs, cid_to_filter_score)
ASER
positive
def test_chaining(self): self.collection_def = {'request': {'operation': 'GetFrobs'}, 'resource': {'type': 'Frob', 'identifiers': [{'target': 'Id', 'source': 'response', 'path': 'Frobs[].Id'}]}} self.client.get_frobs.return_value = {'Frobs': [{'Id': 'one'}, {'Id': 'two'}, {'Id': 'three'}, {'Id': 'four'}]} <DeepExtract> resource_defs = {'Frob': {'identifiers': []}} resource_def = self.collection_def.get('resource', {}) for identifier in resource_def.get('identifiers', []): resource_defs['Frob']['identifiers'].append({'name': identifier['target']}) collection_model = Collection('test', self.collection_def, resource_defs) collection = CollectionManager(collection_model=collection_model, parent=self.parent, factory=self.factory, service_context=ServiceContext(service_name='test', service_model=self.service_model, resource_json_definitions=resource_defs, service_waiter_model=None)) collection = collection </DeepExtract> items = list(collection.filter().all().all()) assert len(items) == 4 assert items[0].id == 'one' assert items[1].id == 'two' assert items[2].id == 'three' assert items[3].id == 'four'
def test_chaining(self): self.collection_def = {'request': {'operation': 'GetFrobs'}, 'resource': {'type': 'Frob', 'identifiers': [{'target': 'Id', 'source': 'response', 'path': 'Frobs[].Id'}]}} self.client.get_frobs.return_value = {'Frobs': [{'Id': 'one'}, {'Id': 'two'}, {'Id': 'three'}, {'Id': 'four'}]} resource_defs = {'Frob': {'identifiers': []}} resource_def = self.collection_def.get('resource', {}) for identifier in resource_def.get('identifiers', []): resource_defs['Frob']['identifiers'].append({'name': identifier['target']}) collection_model = Collection('test', self.collection_def, resource_defs) collection = CollectionManager(collection_model=collection_model, parent=self.parent, factory=self.factory, service_context=ServiceContext(service_name='test', service_model=self.service_model, resource_json_definitions=resource_defs, service_waiter_model=None)) collection = collection items = list(collection.filter().all().all()) assert len(items) == 4 assert items[0].id == 'one' assert items[1].id == 'two' assert items[2].id == 'three' assert items[3].id == 'four'
boto3
positive
@contextlib.contextmanager def task(ctx, config): """ Execute a radosbench parameter sweep Puts radosbench in a loop, taking values from the given config at each iteration. If given, the min and max values below create a range, e.g. min_replicas=1 and max_replicas=3 implies executing with 1-3 replicas. Parameters: clients: [client list] time: seconds to run (default=120) sizes: [list of object sizes] (default=[4M]) mode: <write|read|seq> (default=write) repetitions: execute the same configuration multiple times (default=1) min_num_replicas: minimum number of replicas to use (default = 3) max_num_replicas: maximum number of replicas to use (default = 3) min_num_osds: the minimum number of OSDs in a pool (default=all) max_num_osds: the maximum number of OSDs in a pool (default=all) file: name of CSV-formatted output file (default='radosbench.csv') columns: columns to include (default=all) - rep: execution number (takes values from 'repetitions') - num_osd: number of osds for pool - num_replica: number of replicas - avg_throughput: throughput - avg_latency: latency - stdev_throughput: - stdev_latency: Example: - radsobenchsweep: columns: [rep, num_osd, num_replica, avg_throughput, stdev_throughput] """ log.info('Beginning radosbenchsweep...') assert isinstance(config, dict), 'expecting dictionary for configuration' if len(config.get('clients', [])) != 1: raise Exception('Only one client can be specified') if config.get('mode', 'write') != 'write': raise Exception("Only 'write' mode supported for now.") total_osds_in_cluster = teuthology.num_instances_of_type(ctx.cluster, 'osd') min_num_osds = config.get('min_num_osds', total_osds_in_cluster) max_num_osds = config.get('max_num_osds', total_osds_in_cluster) if max_num_osds > total_osds_in_cluster: raise Exception('max_num_osds cannot be greater than total in cluster') if min_num_osds < 1: raise Exception('min_num_osds cannot be less than 1') if min_num_osds > max_num_osds: raise Exception('min_num_osds cannot be greater than max_num_osd') osds = range(0, total_osds_in_cluster + 1) min_num_replicas = config.get('min_num_replicas', 3) max_num_replicas = config.get('max_num_replicas', 3) if min_num_replicas < 1: raise Exception('min_num_replicas cannot be less than 1') if min_num_replicas > max_num_replicas: raise Exception('min_num_replicas cannot be greater than max_replicas') if max_num_replicas > max_num_osds: raise Exception('max_num_replicas cannot be greater than max_num_osds') replicas = range(min_num_replicas, max_num_replicas + 1) sizes = config.get('size', [4 << 20]) reps = range(config.get('repetitions', 1)) fname = config.get('file', 'radosbench.csv') f = open('{}/{}'.format(ctx.archive, fname), 'w') f.write(get_csv_header(config) + '\n') ctx.manager.set_pool_property('data', 'size', 1) ctx.manager.set_pool_property('metadata', 'size', 1) ctx.manager.set_pool_property('rbd', 'size', 1) current_osds_out = 0 for (osds_out, size, replica, rep) in product(osds, sizes, replicas, reps): osds_in = total_osds_in_cluster - osds_out if osds_in == 0: break if current_osds_out != osds_out: ctx.manager.raw_cluster_cmd('osd', 'reweight', str(osds_out - 1), '0.0') <DeepExtract> first_mon = teuthology.get_first_mon(ctx, config) (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() teuthology.wait_until_healthy(ctx, mon_remote) </DeepExtract> current_osds_out = osds_out if osds_in not in range(min_num_osds, max_num_osds + 1): continue if osds_in < replica: continue <DeepExtract> pool = ctx.manager.create_pool_with_unique_name() ctx.manager.set_pool_property(pool, 'size', replica) wait_until_healthy(ctx, config) log.info('Executing with parameters: ') log.info(' num_osd =' + str(osds_in)) log.info(' size =' + str(size)) log.info(' num_replicas =' + str(replica)) log.info(' repetition =' + str(rep)) for role in config.get('clients', ['client.0']): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() proc = remote.run(args=['adjust-ulimits', 'ceph-coverage', '{}/archive/coverage'.format(teuthology.get_testdir(ctx)), 'rados', '--no-log-to-stderr', '--name', role, '-b', str(size), '-p', pool, 'bench', str(config.get('time', 120)), 'write'], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, stdout=StringIO(), wait=False) proc.wait() out = proc.stdout.getvalue() all_values = {'stdev_throughput': re.sub('Stddev Bandwidth: ', '', re.search('Stddev Bandwidth:.*', out).group(0)), 'stdev_latency': re.sub('Stddev Latency: ', '', re.search('Stddev Latency:.*', out).group(0)), 'avg_throughput': re.sub('Bandwidth \\(MB/sec\\): ', '', re.search('Bandwidth \\(MB/sec\\):.*', out).group(0)), 'avg_latency': re.sub('Average Latency: ', '', re.search('Average Latency:.*', out).group(0)), 'rep': str(rep), 'num_osd': str(osds_in), 'num_replica': str(replica)} values_to_write = [] for column in config['columns']: values_to_write.extend([all_values[column]]) f.write(','.join(values_to_write) + '\n') ctx.manager.remove_pool(pool) </DeepExtract> f.close() yield
@contextlib.contextmanager def task(ctx, config): """ Execute a radosbench parameter sweep Puts radosbench in a loop, taking values from the given config at each iteration. If given, the min and max values below create a range, e.g. min_replicas=1 and max_replicas=3 implies executing with 1-3 replicas. Parameters: clients: [client list] time: seconds to run (default=120) sizes: [list of object sizes] (default=[4M]) mode: <write|read|seq> (default=write) repetitions: execute the same configuration multiple times (default=1) min_num_replicas: minimum number of replicas to use (default = 3) max_num_replicas: maximum number of replicas to use (default = 3) min_num_osds: the minimum number of OSDs in a pool (default=all) max_num_osds: the maximum number of OSDs in a pool (default=all) file: name of CSV-formatted output file (default='radosbench.csv') columns: columns to include (default=all) - rep: execution number (takes values from 'repetitions') - num_osd: number of osds for pool - num_replica: number of replicas - avg_throughput: throughput - avg_latency: latency - stdev_throughput: - stdev_latency: Example: - radsobenchsweep: columns: [rep, num_osd, num_replica, avg_throughput, stdev_throughput] """ log.info('Beginning radosbenchsweep...') assert isinstance(config, dict), 'expecting dictionary for configuration' if len(config.get('clients', [])) != 1: raise Exception('Only one client can be specified') if config.get('mode', 'write') != 'write': raise Exception("Only 'write' mode supported for now.") total_osds_in_cluster = teuthology.num_instances_of_type(ctx.cluster, 'osd') min_num_osds = config.get('min_num_osds', total_osds_in_cluster) max_num_osds = config.get('max_num_osds', total_osds_in_cluster) if max_num_osds > total_osds_in_cluster: raise Exception('max_num_osds cannot be greater than total in cluster') if min_num_osds < 1: raise Exception('min_num_osds cannot be less than 1') if min_num_osds > max_num_osds: raise Exception('min_num_osds cannot be greater than max_num_osd') osds = range(0, total_osds_in_cluster + 1) min_num_replicas = config.get('min_num_replicas', 3) max_num_replicas = config.get('max_num_replicas', 3) if min_num_replicas < 1: raise Exception('min_num_replicas cannot be less than 1') if min_num_replicas > max_num_replicas: raise Exception('min_num_replicas cannot be greater than max_replicas') if max_num_replicas > max_num_osds: raise Exception('max_num_replicas cannot be greater than max_num_osds') replicas = range(min_num_replicas, max_num_replicas + 1) sizes = config.get('size', [4 << 20]) reps = range(config.get('repetitions', 1)) fname = config.get('file', 'radosbench.csv') f = open('{}/{}'.format(ctx.archive, fname), 'w') f.write(get_csv_header(config) + '\n') ctx.manager.set_pool_property('data', 'size', 1) ctx.manager.set_pool_property('metadata', 'size', 1) ctx.manager.set_pool_property('rbd', 'size', 1) current_osds_out = 0 for (osds_out, size, replica, rep) in product(osds, sizes, replicas, reps): osds_in = total_osds_in_cluster - osds_out if osds_in == 0: break if current_osds_out != osds_out: ctx.manager.raw_cluster_cmd('osd', 'reweight', str(osds_out - 1), '0.0') first_mon = teuthology.get_first_mon(ctx, config) (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() teuthology.wait_until_healthy(ctx, mon_remote) current_osds_out = osds_out if osds_in not in range(min_num_osds, max_num_osds + 1): continue if osds_in < replica: continue pool = ctx.manager.create_pool_with_unique_name() ctx.manager.set_pool_property(pool, 'size', replica) wait_until_healthy(ctx, config) log.info('Executing with parameters: ') log.info(' num_osd =' + str(osds_in)) log.info(' size =' + str(size)) log.info(' num_replicas =' + str(replica)) log.info(' repetition =' + str(rep)) for role in config.get('clients', ['client.0']): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() proc = remote.run(args=['adjust-ulimits', 'ceph-coverage', '{}/archive/coverage'.format(teuthology.get_testdir(ctx)), 'rados', '--no-log-to-stderr', '--name', role, '-b', str(size), '-p', pool, 'bench', str(config.get('time', 120)), 'write'], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, stdout=StringIO(), wait=False) proc.wait() out = proc.stdout.getvalue() all_values = {'stdev_throughput': re.sub('Stddev Bandwidth: ', '', re.search('Stddev Bandwidth:.*', out).group(0)), 'stdev_latency': re.sub('Stddev Latency: ', '', re.search('Stddev Latency:.*', out).group(0)), 'avg_throughput': re.sub('Bandwidth \\(MB/sec\\): ', '', re.search('Bandwidth \\(MB/sec\\):.*', out).group(0)), 'avg_latency': re.sub('Average Latency: ', '', re.search('Average Latency:.*', out).group(0)), 'rep': str(rep), 'num_osd': str(osds_in), 'num_replica': str(replica)} values_to_write = [] for column in config['columns']: values_to_write.extend([all_values[column]]) f.write(','.join(values_to_write) + '\n') ctx.manager.remove_pool(pool) f.close() yield
ceph-qa-suite
positive
def test_predicate2_argument1_and_2(self): exprs = [lexpr('AND(language(Python, Scala), nice(Python))')] (dynamic_library, _) = combine_signatures_or_rename_preds(exprs) <DeepExtract> coq_lib = [] for (predicate, pred_type) in dynamic_library.items(): library_entry = build_library_entry(predicate, pred_type) coq_lib.append(library_entry) dynamic_library = sorted(set(coq_lib)) </DeepExtract> expected_dynamic_library = ['Parameter nice : Entity -> Prop.', 'Parameter Python : Entity.', 'Parameter Scala : Entity.', 'Parameter language : Entity -> (Entity -> Prop).', 'Parameter AND : Prop -> (Prop -> Prop).'] for item in dynamic_library: self.assertIn(item, expected_dynamic_library) self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_predicate2_argument1_and_2(self): exprs = [lexpr('AND(language(Python, Scala), nice(Python))')] (dynamic_library, _) = combine_signatures_or_rename_preds(exprs) coq_lib = [] for (predicate, pred_type) in dynamic_library.items(): library_entry = build_library_entry(predicate, pred_type) coq_lib.append(library_entry) dynamic_library = sorted(set(coq_lib)) expected_dynamic_library = ['Parameter nice : Entity -> Prop.', 'Parameter Python : Entity.', 'Parameter Scala : Entity.', 'Parameter language : Entity -> (Entity -> Prop).', 'Parameter AND : Prop -> (Prop -> Prop).'] for item in dynamic_library: self.assertIn(item, expected_dynamic_library) self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
ccg2lambda
positive
def next_task(self, force_advance_scene: bool=False) -> Optional[ObjectNavTask]: if self.max_tasks is not None and self.max_tasks <= 0: return None if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]): self.scene_index = (self.scene_index + 1) % len(self.scenes) random.shuffle(self.episodes[self.scenes[self.scene_index]]) self.episode_index = 0 scene = self.scenes[self.scene_index] episode = self.episodes[scene][self.episode_index] if self.env is None: <DeepExtract> env = RoboThorEnvironment(**self.env_args) self.env = env </DeepExtract> if scene.replace('_physics', '') != self.env.scene_name.replace('_physics', ''): self.env.reset(scene_name=scene) else: self.env.reset_object_filter() self.env.set_object_filter(object_ids=[o['objectId'] for o in self.env.last_event.metadata['objects'] if o['objectType'] == episode['object_type']]) were_materials_randomized = False if self.randomize_materials_in_training: if 'Train' in scene or int(scene.replace('FloorPlan', '').replace('_physics', '')) % 100 < 21: were_materials_randomized = True self.env.controller.step(action='RandomizeMaterials') task_info = {'scene': scene, 'object_type': episode['object_type'], 'materials_randomized': were_materials_randomized} if len(task_info) == 0: get_logger().warning('Scene {} does not contain any objects of any of the types {}.'.format(scene, self.object_types)) task_info['initial_position'] = episode['initial_position'] task_info['initial_orientation'] = episode['initial_orientation'] task_info['initial_horizon'] = episode.get('initial_horizon', 0) task_info['distance_to_target'] = episode.get('shortest_path_length') task_info['path_to_target'] = episode.get('shortest_path') task_info['object_type'] = episode['object_type'] task_info['id'] = episode['id'] if self.allow_flipping and random.random() > 0.5: task_info['mirrored'] = True else: task_info['mirrored'] = False self.episode_index += 1 if self.max_tasks is not None: self.max_tasks -= 1 if not self.env.teleport(pose=episode['initial_position'], rotation=episode['initial_orientation'], horizon=episode.get('initial_horizon', 0)): return self.next_task() self._last_sampled_task = ObjectNavTask(env=self.env, sensors=self.sensors, task_info=task_info, max_steps=self.max_steps, action_space=self._action_space, reward_configs=self.rewards_config) return self._last_sampled_task
def next_task(self, force_advance_scene: bool=False) -> Optional[ObjectNavTask]: if self.max_tasks is not None and self.max_tasks <= 0: return None if self.episode_index >= len(self.episodes[self.scenes[self.scene_index]]): self.scene_index = (self.scene_index + 1) % len(self.scenes) random.shuffle(self.episodes[self.scenes[self.scene_index]]) self.episode_index = 0 scene = self.scenes[self.scene_index] episode = self.episodes[scene][self.episode_index] if self.env is None: env = RoboThorEnvironment(**self.env_args) self.env = env if scene.replace('_physics', '') != self.env.scene_name.replace('_physics', ''): self.env.reset(scene_name=scene) else: self.env.reset_object_filter() self.env.set_object_filter(object_ids=[o['objectId'] for o in self.env.last_event.metadata['objects'] if o['objectType'] == episode['object_type']]) were_materials_randomized = False if self.randomize_materials_in_training: if 'Train' in scene or int(scene.replace('FloorPlan', '').replace('_physics', '')) % 100 < 21: were_materials_randomized = True self.env.controller.step(action='RandomizeMaterials') task_info = {'scene': scene, 'object_type': episode['object_type'], 'materials_randomized': were_materials_randomized} if len(task_info) == 0: get_logger().warning('Scene {} does not contain any objects of any of the types {}.'.format(scene, self.object_types)) task_info['initial_position'] = episode['initial_position'] task_info['initial_orientation'] = episode['initial_orientation'] task_info['initial_horizon'] = episode.get('initial_horizon', 0) task_info['distance_to_target'] = episode.get('shortest_path_length') task_info['path_to_target'] = episode.get('shortest_path') task_info['object_type'] = episode['object_type'] task_info['id'] = episode['id'] if self.allow_flipping and random.random() > 0.5: task_info['mirrored'] = True else: task_info['mirrored'] = False self.episode_index += 1 if self.max_tasks is not None: self.max_tasks -= 1 if not self.env.teleport(pose=episode['initial_position'], rotation=episode['initial_orientation'], horizon=episode.get('initial_horizon', 0)): return self.next_task() self._last_sampled_task = ObjectNavTask(env=self.env, sensors=self.sensors, task_info=task_info, max_steps=self.max_steps, action_space=self._action_space, reward_configs=self.rewards_config) return self._last_sampled_task
allenact
positive
def get_latest_failed_build(self): log.info('getting latest failed build') url = 'https://api.travis-ci.org/repo/{repo}/builds'.format(repo=self.repo.replace('/', '%2F')) req = self.request_handler.get(url, headers=self.headers) if log.isEnabledFor(logging.DEBUG): log.debug('\n%s', jsonpp(req.content)) try: <DeepExtract> log.debug('parsing latest failed build info') build = None json_data = json.loads(req.content) if not json_data or 'builds' not in json_data or (not json_data['builds']): qquit('UNKNOWN', 'no Travis CI builds returned by the Travis API.' + " Either the specified repo '{0}' doesn't exist".format(self.repo) + ' or no builds have happened yet?' + " Also remember the repo is case sensitive, for example 'harisekhon/nagios-plugins' returns this" + " blank build set whereas 'HariSekhon/Nagios-Plugins' succeeds" + ' in returning latest builds information') builds = json_data['builds'] last_build_number = None found_newer_passing_build = False for _ in builds: build_number = _['number'] if not isInt(build_number): raise UnknownError('build number returned is not an integer!') build_number = int(build_number) if last_build_number is None: last_build_number = int(build_number) + 1 if build_number >= last_build_number: raise UnknownError('build number returned is out of sequence, cannot be >= last build returned' + '{0}'.format(support_msg_api())) last_build_number = build_number if _['state'] == 'passed': if build is None and (not found_newer_passing_build): log.warning("found more recent successful build #%s with state = '%s'" + ', you may not need to debug this build any more', _['number'], _['state']) found_newer_passing_build = True elif _['state'] in ('failed', 'errored'): if build is None: build = _ if build is None: qquit('UNKNOWN', 'no recent failed builds found' + ', you may need to specify the --job-id explicitly as shown in the Travis CI UI') if log.isEnabledFor(logging.DEBUG): log.debug('latest failed build:\n%s', jsonpp(build)) latest_build = build </DeepExtract> except (KeyError, ValueError): exception = traceback.format_exc().split('\n')[-2] qquit('UNKNOWN', 'failed to parse expected json response from Travis CI API: {0}. {1}'.format(exception, support_msg_api())) return latest_build
def get_latest_failed_build(self): log.info('getting latest failed build') url = 'https://api.travis-ci.org/repo/{repo}/builds'.format(repo=self.repo.replace('/', '%2F')) req = self.request_handler.get(url, headers=self.headers) if log.isEnabledFor(logging.DEBUG): log.debug('\n%s', jsonpp(req.content)) try: log.debug('parsing latest failed build info') build = None json_data = json.loads(req.content) if not json_data or 'builds' not in json_data or (not json_data['builds']): qquit('UNKNOWN', 'no Travis CI builds returned by the Travis API.' + " Either the specified repo '{0}' doesn't exist".format(self.repo) + ' or no builds have happened yet?' + " Also remember the repo is case sensitive, for example 'harisekhon/nagios-plugins' returns this" + " blank build set whereas 'HariSekhon/Nagios-Plugins' succeeds" + ' in returning latest builds information') builds = json_data['builds'] last_build_number = None found_newer_passing_build = False for _ in builds: build_number = _['number'] if not isInt(build_number): raise UnknownError('build number returned is not an integer!') build_number = int(build_number) if last_build_number is None: last_build_number = int(build_number) + 1 if build_number >= last_build_number: raise UnknownError('build number returned is out of sequence, cannot be >= last build returned' + '{0}'.format(support_msg_api())) last_build_number = build_number if _['state'] == 'passed': if build is None and (not found_newer_passing_build): log.warning("found more recent successful build #%s with state = '%s'" + ', you may not need to debug this build any more', _['number'], _['state']) found_newer_passing_build = True elif _['state'] in ('failed', 'errored'): if build is None: build = _ if build is None: qquit('UNKNOWN', 'no recent failed builds found' + ', you may need to specify the --job-id explicitly as shown in the Travis CI UI') if log.isEnabledFor(logging.DEBUG): log.debug('latest failed build:\n%s', jsonpp(build)) latest_build = build except (KeyError, ValueError): exception = traceback.format_exc().split('\n')[-2] qquit('UNKNOWN', 'failed to parse expected json response from Travis CI API: {0}. {1}'.format(exception, support_msg_api())) return latest_build
DevOps-Python-tools
positive
def load_widgets(self, instance, value): for id in self.plugins: if self.plugins[id]['disabled'] == False and id not in self.widget_list: <DeepExtract> if id == 'resource_tree': self.ids.resource_tree.add_widget(self.plugins[id]['instance']) elif self.plugins[id]['type'] == 'processing': screen = Screen(name=id) screen.add_widget(self.plugins[id]['instance']) self.ids.processing_screens.add_widget(screen) self.add_munu_button(id) elif self.plugins[id]['type'] == 'display': screen = Screen(name=id) screen.add_widget(self.plugins[id]['instance']) self.ids.display_screens.add_widget(screen) </DeepExtract> self.widget_list.append(id) for id in self.widget_list: if self.plugins[id]['disabled'] == True: <DeepExtract> if id == 'resource_tree': self.ids.resource_tree.remove_widget(self.plugins[id]['instance']) elif self.plugins[id]['type'] == 'processing': screen = self.ids.processing_screens.get_screen(id) self.ids.processing_screens.remove_widget(screen) self.remove_menu_button(id) elif self.plugins[id]['type'] == 'display': screen = self.ids.display_screens.get_screen(id) self.ids.display_screens.remove_widget(screen) </DeepExtract> self.widget_list.remove(id)
def load_widgets(self, instance, value): for id in self.plugins: if self.plugins[id]['disabled'] == False and id not in self.widget_list: if id == 'resource_tree': self.ids.resource_tree.add_widget(self.plugins[id]['instance']) elif self.plugins[id]['type'] == 'processing': screen = Screen(name=id) screen.add_widget(self.plugins[id]['instance']) self.ids.processing_screens.add_widget(screen) self.add_munu_button(id) elif self.plugins[id]['type'] == 'display': screen = Screen(name=id) screen.add_widget(self.plugins[id]['instance']) self.ids.display_screens.add_widget(screen) self.widget_list.append(id) for id in self.widget_list: if self.plugins[id]['disabled'] == True: if id == 'resource_tree': self.ids.resource_tree.remove_widget(self.plugins[id]['instance']) elif self.plugins[id]['type'] == 'processing': screen = self.ids.processing_screens.get_screen(id) self.ids.processing_screens.remove_widget(screen) self.remove_menu_button(id) elif self.plugins[id]['type'] == 'display': screen = self.ids.display_screens.get_screen(id) self.ids.display_screens.remove_widget(screen) self.widget_list.remove(id)
deepdiy
positive
def _add_multilevel_rois(blobs): """By default training RoIs are added for a single feature map level only. When using FPN, the RoIs must be distributed over different FPN levels according the level assignment heuristic (see: modeling.FPN. map_rois_to_fpn_levels). """ lvl_min = cfg.FPN.ROI_MIN_LEVEL lvl_max = cfg.FPN.ROI_MAX_LEVEL def _distribute_rois_over_fpn_levels(rois_blob_name): """Distribute rois over the different FPN levels.""" target_lvls = fpn.map_rois_to_fpn_levels(blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min, lvl_max) <DeepExtract> target_lvls = fpn.map_rois_to_fpn_levels(blobs['rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'rois', blobs['rois'], target_lvls, lvl_min, lvl_max) </DeepExtract> if cfg.MODEL.MASK_ON: <DeepExtract> target_lvls = fpn.map_rois_to_fpn_levels(blobs['mask_rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'mask_rois', blobs['mask_rois'], target_lvls, lvl_min, lvl_max) </DeepExtract> if cfg.MODEL.KEYPOINTS_ON: <DeepExtract> target_lvls = fpn.map_rois_to_fpn_levels(blobs['keypoint_rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'keypoint_rois', blobs['keypoint_rois'], target_lvls, lvl_min, lvl_max) </DeepExtract>
def _add_multilevel_rois(blobs): """By default training RoIs are added for a single feature map level only. When using FPN, the RoIs must be distributed over different FPN levels according the level assignment heuristic (see: modeling.FPN. map_rois_to_fpn_levels). """ lvl_min = cfg.FPN.ROI_MIN_LEVEL lvl_max = cfg.FPN.ROI_MAX_LEVEL def _distribute_rois_over_fpn_levels(rois_blob_name): """Distribute rois over the different FPN levels.""" target_lvls = fpn.map_rois_to_fpn_levels(blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min, lvl_max) target_lvls = fpn.map_rois_to_fpn_levels(blobs['rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'rois', blobs['rois'], target_lvls, lvl_min, lvl_max) if cfg.MODEL.MASK_ON: target_lvls = fpn.map_rois_to_fpn_levels(blobs['mask_rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'mask_rois', blobs['mask_rois'], target_lvls, lvl_min, lvl_max) if cfg.MODEL.KEYPOINTS_ON: target_lvls = fpn.map_rois_to_fpn_levels(blobs['keypoint_rois'][:, 1:5], lvl_min, lvl_max) fpn.add_multilevel_roi_blobs(blobs, 'keypoint_rois', blobs['keypoint_rois'], target_lvls, lvl_min, lvl_max) </DeepExtract>
Detectron-DA-Faster-RCNN
positive
def create_app_sphinx(): app = CustomFlask(__name__) <DeepExtract> from critiquebrainz.ws.oauth.views import oauth_bp from critiquebrainz.ws.review.views import review_bp from critiquebrainz.ws.user.views import user_bp from critiquebrainz.ws.review.bulk import bulk_review_bp app.register_blueprint(oauth_bp, url_prefix='/oauth') app.register_blueprint(review_bp, url_prefix='/review') app.register_blueprint(user_bp, url_prefix='/user') app.register_blueprint(bulk_review_bp, url_prefix='/reviews') </DeepExtract> return app
def create_app_sphinx(): app = CustomFlask(__name__) from critiquebrainz.ws.oauth.views import oauth_bp from critiquebrainz.ws.review.views import review_bp from critiquebrainz.ws.user.views import user_bp from critiquebrainz.ws.review.bulk import bulk_review_bp app.register_blueprint(oauth_bp, url_prefix='/oauth') app.register_blueprint(review_bp, url_prefix='/review') app.register_blueprint(user_bp, url_prefix='/user') app.register_blueprint(bulk_review_bp, url_prefix='/reviews') return app
critiquebrainz
positive
def setUp(self): super().setUp() <DeepExtract> app = Application.objects.create(name='app foo_user 1', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_1 = app </DeepExtract> <DeepExtract> app = Application.objects.create(name='app foo_user 2', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_2 = app </DeepExtract> <DeepExtract> app = Application.objects.create(name='app foo_user 3', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_3 = app </DeepExtract> <DeepExtract> app = Application.objects.create(name='app bar_user 1', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.bar_user) self.app_bar_1 = app </DeepExtract> <DeepExtract> app = Application.objects.create(name='app bar_user 2', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.bar_user) self.app_bar_2 = app </DeepExtract>
def setUp(self): super().setUp() app = Application.objects.create(name='app foo_user 1', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_1 = app app = Application.objects.create(name='app foo_user 2', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_2 = app app = Application.objects.create(name='app foo_user 3', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.foo_user) self.app_foo_3 = app app = Application.objects.create(name='app bar_user 1', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.bar_user) self.app_bar_1 = app app = Application.objects.create(name='app bar_user 2', redirect_uris='http://example.com', client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, user=self.bar_user) self.app_bar_2 = app </DeepExtract>
django-oauth-toolkit
positive
def get_matches_and_confidence(self, source_img, target_img, scaling=1.0 / 4.0, confident_mask_type='cyclic_consistency_error_below_3', min_number_of_pts=200): """ Computes matches and corresponding confidence value. Confidence value is obtained with forward-backward cyclic consistency. Args: source_img: torch tensor, bx3xHxW in range [0, 255], not normalized yet target_img: torch tensor, bx3xHxW in range [0, 255], not normalized yet scaling: float, scaling factor applied to target image shape, to obtain the outputted flow field dimensions, where the matches are extracted confident_mask_type: default is 'proba_interval_1_above_10' for PDCNet. See inference_utils/estimate_mask for more details min_number_of_pts: below that number, we discard the retrieved matches (little blobs in cyclic consistency mask) Returns: dict with keys 'kp_source', 'kp_target', 'confidence_value', 'flow' and 'mask' flow and mask are torch tensors """ <DeepExtract> w_scale = target_img.shape[3] h_scale = target_img.shape[2] if output_shape is None and scaling != 1.0: output_shape = (int(h_scale * scaling), int(w_scale * scaling)) (source_img, target_img, ratio_x, ratio_y) = self.pre_process_data(source_img, target_img) output = self.forward(im_reference=target_img, im_query=source_img) flow_est_list = output['flow_estimates'] flow_est = self.div * flow_est_list[-1] if output_shape is not None: ratio_x *= float(output_shape[1]) / float(w_scale) ratio_y *= float(output_shape[0]) / float(h_scale) else: output_shape = (h_scale, w_scale) flow_est = torch.nn.functional.interpolate(input=flow_est, size=output_shape, mode='bilinear', align_corners=False) flow_est[:, 0, :, :] *= ratio_x flow_est[:, 1, :, :] *= ratio_y output_backward = self.forward(source_img, target_img) flow_est_backward = self.div * output_backward['flow_estimates'][-1] flow_est_backward = torch.nn.functional.interpolate(input=flow_est_backward, size=output_shape, mode='bilinear', align_corners=False) flow_est_backward[:, 0, :, :] *= ratio_x flow_est_backward[:, 1, :, :] *= ratio_y cyclic_consistency_error = torch.norm(flow_est + self.warp(flow_est_backward, flow_est), dim=1) uncertainty_est = {'cyclic_consistency_error': cyclic_consistency_error, 'inv_cyclic_consistency_error': 1.0 / (1.0 + cyclic_consistency_error)} if mode == 'channel_first': (flow_estimated, uncertainty_est) = (flow_est, uncertainty_est) else: (flow_estimated, uncertainty_est) = (flow_est.permute(0, 2, 3, 1), uncertainty_est) </DeepExtract> mask = estimate_mask(confident_mask_type, uncertainty_est, list_item=-1) mapping_estimated = convert_flow_to_mapping(flow_estimated) mask = mask & mapping_estimated[:, 0].ge(0) & mapping_estimated[:, 1].ge(0) & mapping_estimated[:, 0].le(source_img.shape[-1] // scaling - 1) & mapping_estimated[:, 1].le(source_img.shape[-2] // scaling - 1) scaling_kp = np.float32(target_img.shape[-2:]) / np.float32(flow_estimated.shape[-2:]) (mkpts_s, mkpts_t) = matches_from_flow(flow_estimated, mask, scaling=scaling_kp[::-1]) confidence_values = uncertainty_est['inv_cyclic_consistency_error'].squeeze()[mask.squeeze()].cpu().numpy() sort_index = np.argsort(np.array(confidence_values)).tolist()[::-1] confidence_values = np.array(confidence_values)[sort_index] mkpts_s = np.array(mkpts_s)[sort_index] mkpts_t = np.array(mkpts_t)[sort_index] if len(mkpts_s) < min_number_of_pts: mkpts_s = np.empty([0, 2], dtype=np.float32) mkpts_t = np.empty([0, 2], dtype=np.float32) confidence_values = np.empty([0], dtype=np.float32) pred = {'kp_source': mkpts_s, 'kp_target': mkpts_t, 'confidence_value': confidence_values, 'flow': self.resize_and_rescale_flow(flow_estimated, target_img.shape[-2:]), 'mask': F.interpolate(input=mask.unsqueeze(1).float(), size=target_img.shape[-2:], mode='bilinear', align_corners=False).squeeze(1)} return pred
def get_matches_and_confidence(self, source_img, target_img, scaling=1.0 / 4.0, confident_mask_type='cyclic_consistency_error_below_3', min_number_of_pts=200): """ Computes matches and corresponding confidence value. Confidence value is obtained with forward-backward cyclic consistency. Args: source_img: torch tensor, bx3xHxW in range [0, 255], not normalized yet target_img: torch tensor, bx3xHxW in range [0, 255], not normalized yet scaling: float, scaling factor applied to target image shape, to obtain the outputted flow field dimensions, where the matches are extracted confident_mask_type: default is 'proba_interval_1_above_10' for PDCNet. See inference_utils/estimate_mask for more details min_number_of_pts: below that number, we discard the retrieved matches (little blobs in cyclic consistency mask) Returns: dict with keys 'kp_source', 'kp_target', 'confidence_value', 'flow' and 'mask' flow and mask are torch tensors """ w_scale = target_img.shape[3] h_scale = target_img.shape[2] if output_shape is None and scaling != 1.0: output_shape = (int(h_scale * scaling), int(w_scale * scaling)) (source_img, target_img, ratio_x, ratio_y) = self.pre_process_data(source_img, target_img) output = self.forward(im_reference=target_img, im_query=source_img) flow_est_list = output['flow_estimates'] flow_est = self.div * flow_est_list[-1] if output_shape is not None: ratio_x *= float(output_shape[1]) / float(w_scale) ratio_y *= float(output_shape[0]) / float(h_scale) else: output_shape = (h_scale, w_scale) flow_est = torch.nn.functional.interpolate(input=flow_est, size=output_shape, mode='bilinear', align_corners=False) flow_est[:, 0, :, :] *= ratio_x flow_est[:, 1, :, :] *= ratio_y output_backward = self.forward(source_img, target_img) flow_est_backward = self.div * output_backward['flow_estimates'][-1] flow_est_backward = torch.nn.functional.interpolate(input=flow_est_backward, size=output_shape, mode='bilinear', align_corners=False) flow_est_backward[:, 0, :, :] *= ratio_x flow_est_backward[:, 1, :, :] *= ratio_y cyclic_consistency_error = torch.norm(flow_est + self.warp(flow_est_backward, flow_est), dim=1) uncertainty_est = {'cyclic_consistency_error': cyclic_consistency_error, 'inv_cyclic_consistency_error': 1.0 / (1.0 + cyclic_consistency_error)} if mode == 'channel_first': (flow_estimated, uncertainty_est) = (flow_est, uncertainty_est) else: (flow_estimated, uncertainty_est) = (flow_est.permute(0, 2, 3, 1), uncertainty_est) mask = estimate_mask(confident_mask_type, uncertainty_est, list_item=-1) mapping_estimated = convert_flow_to_mapping(flow_estimated) mask = mask & mapping_estimated[:, 0].ge(0) & mapping_estimated[:, 1].ge(0) & mapping_estimated[:, 0].le(source_img.shape[-1] // scaling - 1) & mapping_estimated[:, 1].le(source_img.shape[-2] // scaling - 1) scaling_kp = np.float32(target_img.shape[-2:]) / np.float32(flow_estimated.shape[-2:]) (mkpts_s, mkpts_t) = matches_from_flow(flow_estimated, mask, scaling=scaling_kp[::-1]) confidence_values = uncertainty_est['inv_cyclic_consistency_error'].squeeze()[mask.squeeze()].cpu().numpy() sort_index = np.argsort(np.array(confidence_values)).tolist()[::-1] confidence_values = np.array(confidence_values)[sort_index] mkpts_s = np.array(mkpts_s)[sort_index] mkpts_t = np.array(mkpts_t)[sort_index] if len(mkpts_s) < min_number_of_pts: mkpts_s = np.empty([0, 2], dtype=np.float32) mkpts_t = np.empty([0, 2], dtype=np.float32) confidence_values = np.empty([0], dtype=np.float32) pred = {'kp_source': mkpts_s, 'kp_target': mkpts_t, 'confidence_value': confidence_values, 'flow': self.resize_and_rescale_flow(flow_estimated, target_img.shape[-2:]), 'mask': F.interpolate(input=mask.unsqueeze(1).float(), size=target_img.shape[-2:], mode='bilinear', align_corners=False).squeeze(1)} return pred
DenseMatching
positive
def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): """ Check if the elastic IP is currently associated with the device """ <DeepExtract> filters = [] kwargs = {} if address['PublicIp']: kwargs['PublicIps'] = [address['PublicIp']] elif device_id: if is_instance: filters.append({'Name': 'instance-id', 'Values': [device_id]}) else: filters.append({'Name': 'network-interface-id', 'Values': [device_id]}) if len(filters) > 0: kwargs['Filters'] = filters elif len(filters) == 0 and address['PublicIp'] is None: address = None try: addresses = ec2.describe_addresses(**kwargs) except is_boto3_error_code('InvalidAddress.NotFound') as e: if module.params.get('state') == 'absent': module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses['Addresses'] if len(addresses) == 1: address = addresses[0] elif len(addresses) > 1: msg = 'Found more than one address using args {0}'.format(kwargs) msg += 'Addresses found: {0}'.format(addresses) module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) </DeepExtract> if address: if is_instance: if 'InstanceId' in address and address['InstanceId'] == device_id: return address elif 'NetworkInterfaceId' in address and address['NetworkInterfaceId'] == device_id: return address return False
def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): """ Check if the elastic IP is currently associated with the device """ filters = [] kwargs = {} if address['PublicIp']: kwargs['PublicIps'] = [address['PublicIp']] elif device_id: if is_instance: filters.append({'Name': 'instance-id', 'Values': [device_id]}) else: filters.append({'Name': 'network-interface-id', 'Values': [device_id]}) if len(filters) > 0: kwargs['Filters'] = filters elif len(filters) == 0 and address['PublicIp'] is None: address = None try: addresses = ec2.describe_addresses(**kwargs) except is_boto3_error_code('InvalidAddress.NotFound') as e: if module.params.get('state') == 'absent': module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses['Addresses'] if len(addresses) == 1: address = addresses[0] elif len(addresses) > 1: msg = 'Found more than one address using args {0}'.format(kwargs) msg += 'Addresses found: {0}'.format(addresses) module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) if address: if is_instance: if 'InstanceId' in address and address['InstanceId'] == device_id: return address elif 'NetworkInterfaceId' in address and address['NetworkInterfaceId'] == device_id: return address return False
amazon.aws
positive
def generate_STS(self, lines): ts = TS('Additional system') init = TRUE() trans = TRUE() invar = TRUE() states = {} assigns = set([]) varsmap = {} def def_var(name, vtype): if name in varsmap: return varsmap[name] var = Symbol(name, vtype) ts.add_state_var(var) return var for line in lines: if line.comment: continue if line.init: if T_I not in states: states[T_I] = TRUE() if self.pyparsing_version == PYPARSING_220: init_varname = line.init.varname init_value = line.init.value else: dline = dict(line) init_varname = dline[P_VARNAME] if P_VARNAME in dline else '' init_value = dline[P_VALUE] if init_varname != '': <DeepExtract> if init_value == T_FALSE: (init_value, typev) = (FALSE(), BOOL) if init_value == T_TRUE: (init_value, typev) = (TRUE(), BOOL) if T_US in init_value: width = int(init_value.split(T_US)[1]) init_value = int(init_value.split(T_US)[0]) else: width = len(init_value) * 4 init_value = int(('0x%s' % init_value).lower(), 0) (init_value, typev) = (BV(init_value, width), BVType(width)) </DeepExtract> <DeepExtract> if init_varname in varsmap: ivar = varsmap[init_varname] var = Symbol(init_varname, typev) ts.add_state_var(var) ivar = var </DeepExtract> state = EqualsOrIff(ivar, value) else: state = TRUE() if init_value == T_TRUE else FALSE() states[T_I] = And(states[T_I], state) init = And(init, state) state = TRUE() if line.state: if self.pyparsing_version == PYPARSING_220: sname = T_S + line.state.id state_varname = line.state.varname state_value = line.state.value else: dline = dict(line) sname = T_S + dline[P_ID] state_varname = dline[P_VARNAME] if P_VARNAME in dline else '' state_value = dline[P_VALUE] if state_varname != '': <DeepExtract> if state_value == T_FALSE: (state_value, typev) = (FALSE(), BOOL) if state_value == T_TRUE: (state_value, typev) = (TRUE(), BOOL) if T_US in state_value: width = int(state_value.split(T_US)[1]) state_value = int(state_value.split(T_US)[0]) else: width = len(state_value) * 4 state_value = int(('0x%s' % state_value).lower(), 0) (state_value, typev) = (BV(state_value, width), BVType(width)) </DeepExtract> <DeepExtract> if state_varname in varsmap: ivar = varsmap[state_varname] var = Symbol(state_varname, typev) ts.add_state_var(var) ivar = var </DeepExtract> state = EqualsOrIff(ivar, value) assval = (sname, state_varname) if assval not in assigns: assigns.add(assval) else: Logger.error('Double assignment for variable "%s" at state "%s"' % (state_varname, sname)) else: state = TRUE() if state_value == T_TRUE else FALSE() if sname not in states: states[sname] = TRUE() states[sname] = And(states[sname], state) stateid_width = math.ceil(math.log(len(states)) / math.log(2)) stateid_var = Symbol(self.new_state_id(), BVType(stateid_width)) init = And(init, EqualsOrIff(stateid_var, BV(0, stateid_width))) invar = And(invar, Implies(EqualsOrIff(stateid_var, BV(0, stateid_width)), states[T_I])) states[T_I] = EqualsOrIff(stateid_var, BV(0, stateid_width)) count = 1 state_items = list(states.keys()) state_items.sort() for state in state_items: if state == T_I: continue invar = And(invar, Implies(EqualsOrIff(stateid_var, BV(count, stateid_width)), states[state])) states[state] = EqualsOrIff(stateid_var, BV(count, stateid_width)) count += 1 transdic = {} for line in lines: if line.comment: continue if line.trans: if self.pyparsing_version == PYPARSING_220: line_start = line.trans.start line_end = line.trans.end else: line_start = dict(line)[P_START] line_end = dict(line)[P_END] if states[line_start] not in transdic: transdic[states[line_start]] = [] transdic[states[line_start]].append(states[line_end]) for transition in transdic: (start, end) = (transition, transdic[transition]) trans = And(trans, Implies(start, TS.to_next(Or(end)))) vars_ = [v for v in get_free_variables(trans) if not TS.is_prime(v)] vars_ += get_free_variables(init) vars_ += get_free_variables(invar) invar = And(invar, BVULE(stateid_var, BV(count - 1, stateid_width))) ts.set_behavior(init, trans, invar) ts.add_state_var(stateid_var) hts = HTS('ETS') hts.add_ts(ts) invar_props = [] ltl_props = [] return (hts, invar_props, ltl_props)
def generate_STS(self, lines): ts = TS('Additional system') init = TRUE() trans = TRUE() invar = TRUE() states = {} assigns = set([]) varsmap = {} def def_var(name, vtype): if name in varsmap: return varsmap[name] var = Symbol(name, vtype) ts.add_state_var(var) return var for line in lines: if line.comment: continue if line.init: if T_I not in states: states[T_I] = TRUE() if self.pyparsing_version == PYPARSING_220: init_varname = line.init.varname init_value = line.init.value else: dline = dict(line) init_varname = dline[P_VARNAME] if P_VARNAME in dline else '' init_value = dline[P_VALUE] if init_varname != '': if init_value == T_FALSE: (init_value, typev) = (FALSE(), BOOL) if init_value == T_TRUE: (init_value, typev) = (TRUE(), BOOL) if T_US in init_value: width = int(init_value.split(T_US)[1]) init_value = int(init_value.split(T_US)[0]) else: width = len(init_value) * 4 init_value = int(('0x%s' % init_value).lower(), 0) (init_value, typev) = (BV(init_value, width), BVType(width)) if init_varname in varsmap: ivar = varsmap[init_varname] var = Symbol(init_varname, typev) ts.add_state_var(var) ivar = var state = EqualsOrIff(ivar, value) else: state = TRUE() if init_value == T_TRUE else FALSE() states[T_I] = And(states[T_I], state) init = And(init, state) state = TRUE() if line.state: if self.pyparsing_version == PYPARSING_220: sname = T_S + line.state.id state_varname = line.state.varname state_value = line.state.value else: dline = dict(line) sname = T_S + dline[P_ID] state_varname = dline[P_VARNAME] if P_VARNAME in dline else '' state_value = dline[P_VALUE] if state_varname != '': if state_value == T_FALSE: (state_value, typev) = (FALSE(), BOOL) if state_value == T_TRUE: (state_value, typev) = (TRUE(), BOOL) if T_US in state_value: width = int(state_value.split(T_US)[1]) state_value = int(state_value.split(T_US)[0]) else: width = len(state_value) * 4 state_value = int(('0x%s' % state_value).lower(), 0) (state_value, typev) = (BV(state_value, width), BVType(width)) if state_varname in varsmap: ivar = varsmap[state_varname] var = Symbol(state_varname, typev) ts.add_state_var(var) ivar = var state = EqualsOrIff(ivar, value) assval = (sname, state_varname) if assval not in assigns: assigns.add(assval) else: Logger.error('Double assignment for variable "%s" at state "%s"' % (state_varname, sname)) else: state = TRUE() if state_value == T_TRUE else FALSE() if sname not in states: states[sname] = TRUE() states[sname] = And(states[sname], state) stateid_width = math.ceil(math.log(len(states)) / math.log(2)) stateid_var = Symbol(self.new_state_id(), BVType(stateid_width)) init = And(init, EqualsOrIff(stateid_var, BV(0, stateid_width))) invar = And(invar, Implies(EqualsOrIff(stateid_var, BV(0, stateid_width)), states[T_I])) states[T_I] = EqualsOrIff(stateid_var, BV(0, stateid_width)) count = 1 state_items = list(states.keys()) state_items.sort() for state in state_items: if state == T_I: continue invar = And(invar, Implies(EqualsOrIff(stateid_var, BV(count, stateid_width)), states[state])) states[state] = EqualsOrIff(stateid_var, BV(count, stateid_width)) count += 1 transdic = {} for line in lines: if line.comment: continue if line.trans: if self.pyparsing_version == PYPARSING_220: line_start = line.trans.start line_end = line.trans.end else: line_start = dict(line)[P_START] line_end = dict(line)[P_END] if states[line_start] not in transdic: transdic[states[line_start]] = [] transdic[states[line_start]].append(states[line_end]) for transition in transdic: (start, end) = (transition, transdic[transition]) trans = And(trans, Implies(start, TS.to_next(Or(end)))) vars_ = [v for v in get_free_variables(trans) if not TS.is_prime(v)] vars_ += get_free_variables(init) vars_ += get_free_variables(invar) invar = And(invar, BVULE(stateid_var, BV(count - 1, stateid_width))) ts.set_behavior(init, trans, invar) ts.add_state_var(stateid_var) hts = HTS('ETS') hts.add_ts(ts) invar_props = [] ltl_props = [] return (hts, invar_props, ltl_props)
CoSA
positive
def build_content_sim_relation_text_lsa(network, signatures): def get_nid_gen(signatures): for (nid, sig) in signatures: yield nid docs = [] for (nid, e) in signatures: docs.append(' '.join(e)) tfidf = da.get_tfidf_docs(docs) print('TF-IDF shape before LSA: ' + str(tfidf.shape)) st = time.time() <DeepExtract> svd = TruncatedSVD(n_components=1000, random_state=42) svd.fit(tfidf) new_tfidf_vectors = svd.transform(tfidf) tfidf = new_tfidf_vectors </DeepExtract> et = time.time() print('TF-IDF shape after LSA: ' + str(tfidf.shape)) print('Time to compute LSA: {0}'.format(str(et - st))) lsh_projections = RandomBinaryProjections('default', 10000) <DeepExtract> for (nid, sig) in signatures: yield nid </DeepExtract> <DeepExtract> num_features = tfidf.shape[1] print('TF-IDF shape: ' + str(tfidf.shape)) text_engine = Engine(num_features, lshashes=[lsh_projections], distance=CosineDistance()) st = time.time() row_idx = 0 for key in nid_gen: if True: dense_row = tfidf[row_idx] array = dense_row else: sparse_row = tfidf.getrow(row_idx) dense_row = sparse_row.todense() array = dense_row.A[0] row_idx += 1 text_engine.store_vector(array, key) et = time.time() print('Total index text: ' + str(et - st)) text_engine = text_engine </DeepExtract> <DeepExtract> for (nid, sig) in signatures: yield nid </DeepExtract> <DeepExtract> st = time.time() row_idx = 0 for nid in nid_gen: if True: dense_row = tfidf[row_idx] array = dense_row else: sparse_row = tfidf.getrow(row_idx) dense_row = sparse_row.todense() array = dense_row.A[0] row_idx += 1 N = text_engine.neighbours(array) if len(N) > 1: for n in N: (data, key, value) = n if nid != key: network.add_relation(nid, key, Relation.CONTENT_SIM, value) et = time.time() print('Create graph schema: {0}'.format(str(et - st))) </DeepExtract>
def build_content_sim_relation_text_lsa(network, signatures): def get_nid_gen(signatures): for (nid, sig) in signatures: yield nid docs = [] for (nid, e) in signatures: docs.append(' '.join(e)) tfidf = da.get_tfidf_docs(docs) print('TF-IDF shape before LSA: ' + str(tfidf.shape)) st = time.time() svd = TruncatedSVD(n_components=1000, random_state=42) svd.fit(tfidf) new_tfidf_vectors = svd.transform(tfidf) tfidf = new_tfidf_vectors et = time.time() print('TF-IDF shape after LSA: ' + str(tfidf.shape)) print('Time to compute LSA: {0}'.format(str(et - st))) lsh_projections = RandomBinaryProjections('default', 10000) for (nid, sig) in signatures: yield nid num_features = tfidf.shape[1] print('TF-IDF shape: ' + str(tfidf.shape)) text_engine = Engine(num_features, lshashes=[lsh_projections], distance=CosineDistance()) st = time.time() row_idx = 0 for key in nid_gen: if True: dense_row = tfidf[row_idx] array = dense_row else: sparse_row = tfidf.getrow(row_idx) dense_row = sparse_row.todense() array = dense_row.A[0] row_idx += 1 text_engine.store_vector(array, key) et = time.time() print('Total index text: ' + str(et - st)) text_engine = text_engine for (nid, sig) in signatures: yield nid st = time.time() row_idx = 0 for nid in nid_gen: if True: dense_row = tfidf[row_idx] array = dense_row else: sparse_row = tfidf.getrow(row_idx) dense_row = sparse_row.todense() array = dense_row.A[0] row_idx += 1 N = text_engine.neighbours(array) if len(N) > 1: for n in N: (data, key, value) = n if nid != key: network.add_relation(nid, key, Relation.CONTENT_SIM, value) et = time.time() print('Create graph schema: {0}'.format(str(et - st))) </DeepExtract>
aurum-datadiscovery
positive
def con_sty_loss(self, real, anime, fake): real_feature_map = self.pretrained(real) fake_feature_map = self.pretrained(fake) anime_feature_map = self.pretrained(anime) c_loss = self.criterionL1(real_feature_map, fake_feature_map) <DeepExtract> s_loss = self.criterionL1(self.gram(anime_feature_map), self.gram(fake_feature_map)) </DeepExtract> return (c_loss, s_loss)
def con_sty_loss(self, real, anime, fake): real_feature_map = self.pretrained(real) fake_feature_map = self.pretrained(fake) anime_feature_map = self.pretrained(anime) c_loss = self.criterionL1(real_feature_map, fake_feature_map) s_loss = self.criterionL1(self.gram(anime_feature_map), self.gram(fake_feature_map)) return (c_loss, s_loss)
-AI-emmmm
positive
@force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generators) device = cls_scores[0].device <DeepExtract> num_imgs = len(img_metas) num_levels = len(featmap_sizes) multi_level_anchors = [] for i in range(num_levels): anchors = self.anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i], device=device) multi_level_anchors.append(anchors) anchor_list = [multi_level_anchors for _ in range(num_imgs)] valid_flag_list = [] for (img_id, img_meta) in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): anchor_stride = self.anchor_strides[i] (feat_h, feat_w) = featmap_sizes[i] (h, w) = img_meta['pad_shape'][:2] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = self.anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) (anchor_list, valid_flag_list) = (anchor_list, valid_flag_list) </DeepExtract> label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = anchor_target(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = num_total_pos + num_total_neg if self.sampling else num_total_pos (losses_cls, losses_bbox) = multi_apply(self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples, cfg=cfg) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generators) device = cls_scores[0].device num_imgs = len(img_metas) num_levels = len(featmap_sizes) multi_level_anchors = [] for i in range(num_levels): anchors = self.anchor_generators[i].grid_anchors(featmap_sizes[i], self.anchor_strides[i], device=device) multi_level_anchors.append(anchors) anchor_list = [multi_level_anchors for _ in range(num_imgs)] valid_flag_list = [] for (img_id, img_meta) in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): anchor_stride = self.anchor_strides[i] (feat_h, feat_w) = featmap_sizes[i] (h, w) = img_meta['pad_shape'][:2] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = self.anchor_generators[i].valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) (anchor_list, valid_flag_list) = (anchor_list, valid_flag_list) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = anchor_target(anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = num_total_pos + num_total_neg if self.sampling else num_total_pos (losses_cls, losses_bbox) = multi_apply(self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples, cfg=cfg) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
ATSS-EfficientDet-PyTorch
positive
def initialize(self, params): if len(params) != 1: return False <DeepExtract> try: decoded_hash = bytes.fromhex(params[0]) except ValueError: self._tx_hash = None if len(decoded_hash) != 32: self._tx_hash = None self._tx_hash = decoded_hash </DeepExtract> if self._tx_hash is None: return False return True
def initialize(self, params): if len(params) != 1: return False try: decoded_hash = bytes.fromhex(params[0]) except ValueError: self._tx_hash = None if len(decoded_hash) != 32: self._tx_hash = None self._tx_hash = decoded_hash if self._tx_hash is None: return False return True
DarkWallet
positive
def symlink(src, dst): """Like os.symlink, but overwrites dst and logs""" <DeepExtract> if self.logger is not None: self.logger.debug("linking: '%s' -> '%s'" % (self.relpath(dst), self.relpath(src))) </DeepExtract> if os.path.lexists(dst): os.remove(dst) os.symlink(src, dst)
def symlink(src, dst): """Like os.symlink, but overwrites dst and logs""" if self.logger is not None: self.logger.debug("linking: '%s' -> '%s'" % (self.relpath(dst), self.relpath(src))) if os.path.lexists(dst): os.remove(dst) os.symlink(src, dst)
Benchmarks
positive
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): lookahead = None lookaheadstack = [] actions = self.action goto = self.goto prod = self.productions defaulted_states = self.defaulted_states pslice = YaccProduction(None) errorcount = 0 if not lexer: from . import lex lexer = lex.lexer pslice.lexer = lexer pslice.parser = self if input is not None: lexer.input(input) if tokenfunc is None: get_token = lexer.token else: get_token = tokenfunc self.token = get_token statestack = [] self.statestack = statestack symstack = [] self.symstack = symstack pslice.stack = symstack errtoken = None statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: statestack.append(t) state = t symstack.append(lookahead) lookahead = None if errorcount: errorcount -= 1 continue if t < 0: p = prod[-t] pname = p.name plen = p.len sym = YaccSymbol() sym.type = pname sym.value = None if plen: targ = symstack[-plen - 1:] targ[0] = sym if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos sym.filename = getattr(t1, 'filename', '') t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) pslice.slice = targ try: del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: lookaheadstack.append(lookahead) symstack.extend(targ[1:-1]) statestack.pop() state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue else: if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos sym.filename = getattr(lexer, 'filename', '') targ = [sym] pslice.slice = targ try: self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: lookaheadstack.append(lookahead) statestack.pop() state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None if self.errorfunc: if errtoken and (not hasattr(errtoken, 'lexer')): errtoken.lexer = lexer self.state = state <DeepExtract> global _errok, _token, _restart _errok = self.errok _token = self.token _restart = self.restart r = self.errorfunc(errtoken) try: del _errok, _token, _restart except NameError: pass tok = r </DeepExtract> if self.errorok: lookahead = tok errtoken = None continue elif errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 del lookaheadstack[:] continue if lookahead.type == '$end': return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos statestack.pop() state = statestack[-1] continue raise RuntimeError('yacc: internal parser error!!!\n')
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): lookahead = None lookaheadstack = [] actions = self.action goto = self.goto prod = self.productions defaulted_states = self.defaulted_states pslice = YaccProduction(None) errorcount = 0 if not lexer: from . import lex lexer = lex.lexer pslice.lexer = lexer pslice.parser = self if input is not None: lexer.input(input) if tokenfunc is None: get_token = lexer.token else: get_token = tokenfunc self.token = get_token statestack = [] self.statestack = statestack symstack = [] self.symstack = symstack pslice.stack = symstack errtoken = None statestack.append(0) sym = YaccSymbol() sym.type = '$end' symstack.append(sym) state = 0 while True: if state not in defaulted_states: if not lookahead: if not lookaheadstack: lookahead = get_token() else: lookahead = lookaheadstack.pop() if not lookahead: lookahead = YaccSymbol() lookahead.type = '$end' ltype = lookahead.type t = actions[state].get(ltype) else: t = defaulted_states[state] if t is not None: if t > 0: statestack.append(t) state = t symstack.append(lookahead) lookahead = None if errorcount: errorcount -= 1 continue if t < 0: p = prod[-t] pname = p.name plen = p.len sym = YaccSymbol() sym.type = pname sym.value = None if plen: targ = symstack[-plen - 1:] targ[0] = sym if tracking: t1 = targ[1] sym.lineno = t1.lineno sym.lexpos = t1.lexpos sym.filename = getattr(t1, 'filename', '') t1 = targ[-1] sym.endlineno = getattr(t1, 'endlineno', t1.lineno) sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) pslice.slice = targ try: del symstack[-plen:] self.state = state p.callable(pslice) del statestack[-plen:] symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: lookaheadstack.append(lookahead) symstack.extend(targ[1:-1]) statestack.pop() state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue else: if tracking: sym.lineno = lexer.lineno sym.lexpos = lexer.lexpos sym.filename = getattr(lexer, 'filename', '') targ = [sym] pslice.slice = targ try: self.state = state p.callable(pslice) symstack.append(sym) state = goto[statestack[-1]][pname] statestack.append(state) except SyntaxError: lookaheadstack.append(lookahead) statestack.pop() state = statestack[-1] sym.type = 'error' sym.value = 'error' lookahead = sym errorcount = error_count self.errorok = False continue if t == 0: n = symstack[-1] result = getattr(n, 'value', None) return result if t is None: if errorcount == 0 or self.errorok: errorcount = error_count self.errorok = False errtoken = lookahead if errtoken.type == '$end': errtoken = None if self.errorfunc: if errtoken and (not hasattr(errtoken, 'lexer')): errtoken.lexer = lexer self.state = state global _errok, _token, _restart _errok = self.errok _token = self.token _restart = self.restart r = self.errorfunc(errtoken) try: del _errok, _token, _restart except NameError: pass tok = r if self.errorok: lookahead = tok errtoken = None continue elif errtoken: if hasattr(errtoken, 'lineno'): lineno = lookahead.lineno else: lineno = 0 if lineno: sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) else: sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) else: sys.stderr.write('yacc: Parse error in input. EOF\n') return else: errorcount = error_count if len(statestack) <= 1 and lookahead.type != '$end': lookahead = None errtoken = None state = 0 del lookaheadstack[:] continue if lookahead.type == '$end': return if lookahead.type != 'error': sym = symstack[-1] if sym.type == 'error': if tracking: sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) lookahead = None continue t = YaccSymbol() t.type = 'error' if hasattr(lookahead, 'lineno'): t.lineno = t.endlineno = lookahead.lineno if hasattr(lookahead, 'lexpos'): t.lexpos = t.endlexpos = lookahead.lexpos t.value = lookahead lookaheadstack.append(lookahead) lookahead = t else: sym = symstack.pop() if tracking: lookahead.lineno = sym.lineno lookahead.lexpos = sym.lexpos statestack.pop() state = statestack[-1] continue raise RuntimeError('yacc: internal parser error!!!\n')
ctypesgen
positive
@action('Remove unused uploads') def clean_uploads(): upload_dir = current_app.config['UPLOAD_DIR'] (_, _, filenames) = next(os.walk(upload_dir)) existing_uploads = set(filenames) if len(existing_uploads) != 0: def get_filenames_from_column(column): results = db.session.query(column).filter(column.isnot(None), column != '').all() return set([os.path.basename(x[0]) for x in results]) <DeepExtract> results = db.session.query(PackageRelease.url).filter(PackageRelease.url.isnot(None), PackageRelease.url != '').all() release_urls = set([os.path.basename(x[0]) for x in results]) </DeepExtract> <DeepExtract> results = db.session.query(PackageScreenshot.url).filter(PackageScreenshot.url.isnot(None), PackageScreenshot.url != '').all() screenshot_urls = set([os.path.basename(x[0]) for x in results]) </DeepExtract> db_urls = release_urls.union(screenshot_urls) unreachable = existing_uploads.difference(db_urls) import sys print('On Disk: ', existing_uploads, file=sys.stderr) print('In DB: ', db_urls, file=sys.stderr) print('Unreachable: ', unreachable, file=sys.stderr) for filename in unreachable: os.remove(os.path.join(upload_dir, filename)) flash('Deleted ' + str(len(unreachable)) + ' unreachable uploads', 'success') else: flash('No downloads to create', 'danger') return redirect(url_for('admin.admin_page'))
@action('Remove unused uploads') def clean_uploads(): upload_dir = current_app.config['UPLOAD_DIR'] (_, _, filenames) = next(os.walk(upload_dir)) existing_uploads = set(filenames) if len(existing_uploads) != 0: def get_filenames_from_column(column): results = db.session.query(column).filter(column.isnot(None), column != '').all() return set([os.path.basename(x[0]) for x in results]) results = db.session.query(PackageRelease.url).filter(PackageRelease.url.isnot(None), PackageRelease.url != '').all() release_urls = set([os.path.basename(x[0]) for x in results]) results = db.session.query(PackageScreenshot.url).filter(PackageScreenshot.url.isnot(None), PackageScreenshot.url != '').all() screenshot_urls = set([os.path.basename(x[0]) for x in results]) db_urls = release_urls.union(screenshot_urls) unreachable = existing_uploads.difference(db_urls) import sys print('On Disk: ', existing_uploads, file=sys.stderr) print('In DB: ', db_urls, file=sys.stderr) print('Unreachable: ', unreachable, file=sys.stderr) for filename in unreachable: os.remove(os.path.join(upload_dir, filename)) flash('Deleted ' + str(len(unreachable)) + ' unreachable uploads', 'success') else: flash('No downloads to create', 'danger') return redirect(url_for('admin.admin_page'))
contentdb
positive
def test_update_states_updated_command(self): with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: <DeepExtract> self._setup_containers(rsps, [_container('sub_sub_svc'), _container('sub_svc'), _container('redis'), _container('svc'), _container('server')]) </DeepExtract> self.sample_map.containers['server'].create_options.update(command='/bin/true') <DeepExtract> cd = {} nd = {} vd = {} imd = {} for s in UpdateStateGenerator(self.policy, {}).get_states(self.server_config_id): config_id = s.config_id if config_id.config_type == ItemType.CONTAINER: cd[config_id.config_name, config_id.instance_name] = s elif config_id.config_type == ItemType.VOLUME: vd[config_id.config_name, config_id.instance_name] = s elif config_id.config_type == ItemType.NETWORK: nd[config_id.config_name] = s elif config_id.config_type == ItemType.IMAGE: imd[config_id.config_name, config_id.instance_name] = s else: raise ValueError('Invalid configuration type.', config_id.config_type) states = {'containers': cd, 'volumes': vd, 'networks': nd, 'images': imd} </DeepExtract> server_state = states['containers']['server', None] self.assertEqual(server_state.base_state, State.RUNNING) self.assertEqual(server_state.state_flags & StateFlags.MISC_MISMATCH, StateFlags.MISC_MISMATCH)
def test_update_states_updated_command(self): with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: self._setup_containers(rsps, [_container('sub_sub_svc'), _container('sub_svc'), _container('redis'), _container('svc'), _container('server')]) self.sample_map.containers['server'].create_options.update(command='/bin/true') cd = {} nd = {} vd = {} imd = {} for s in UpdateStateGenerator(self.policy, {}).get_states(self.server_config_id): config_id = s.config_id if config_id.config_type == ItemType.CONTAINER: cd[config_id.config_name, config_id.instance_name] = s elif config_id.config_type == ItemType.VOLUME: vd[config_id.config_name, config_id.instance_name] = s elif config_id.config_type == ItemType.NETWORK: nd[config_id.config_name] = s elif config_id.config_type == ItemType.IMAGE: imd[config_id.config_name, config_id.instance_name] = s else: raise ValueError('Invalid configuration type.', config_id.config_type) states = {'containers': cd, 'volumes': vd, 'networks': nd, 'images': imd} server_state = states['containers']['server', None] self.assertEqual(server_state.base_state, State.RUNNING) self.assertEqual(server_state.state_flags & StateFlags.MISC_MISMATCH, StateFlags.MISC_MISMATCH)
docker-map
positive
def al_access_key_id(generated_snapshot: dict, kwargs={}) -> dict: PASSWORD_KEY_RE = '^(?i)aws_?(access)_?(key)_?(id)_?$' PASSWORD_VALUE_RE = '^(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}' <DeepExtract> output = {} errors = [] try: issue_found = False skipped = True if isinstance(generated_snapshot.get('resources'), list): for resource in generated_snapshot.get('resources'): skipped = False path_list = get_paths(resource) for path in path_list: nested_resource = resource for key in path: nested_resource = nested_resource[key] if isinstance(nested_resource, (int, float, complex, bool)): nested_resource = str(nested_resource) if isinstance(nested_resource, str) and re.match(PASSWORD_VALUE_RE, nested_resource) and (re.match(PASSWORD_KEY_RE, str(key), re.I) if PASSWORD_KEY_RE else True) and (not re.match(EXCLUDE_RE, str(nested_resource)) if EXCLUDE_RE else True): if shannon_entropy_password: (_, normalized_entropy) = shannon_entropy(nested_resource) if normalized_entropy > 0.965: errors.append({'leaked_password_path': 'resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path])}) issue_found = True logger.warning('Leaked Password at:%s\nvalue:%s' % ('resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path]), nested_resource)) else: issue_found = True errors.append({'leaked_password_path': 'resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path])}) logger.warning('Leaked Password at:%s\nvalue:%s' % ('resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path]), nested_resource)) output['issue'] = True if issue_found else False if errors: output['errors'] = errors output['skipped'] = skipped output = output except Exception as ex: print(traceback.format_exc()) output['issue'] = None output['err'] = str(ex) output['skipped'] = skipped output = output </DeepExtract> if output['issue'] == True: output['al_access_key_id_err'] = 'There is a possibility that AWS secret access key has leaked' elif output['issue'] == None: output['al_access_key_id_err'] = output['err'] output.pop('err') else: output['al_access_key_id_err'] = '' return output
def al_access_key_id(generated_snapshot: dict, kwargs={}) -> dict: PASSWORD_KEY_RE = '^(?i)aws_?(access)_?(key)_?(id)_?$' PASSWORD_VALUE_RE = '^(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}' output = {} errors = [] try: issue_found = False skipped = True if isinstance(generated_snapshot.get('resources'), list): for resource in generated_snapshot.get('resources'): skipped = False path_list = get_paths(resource) for path in path_list: nested_resource = resource for key in path: nested_resource = nested_resource[key] if isinstance(nested_resource, (int, float, complex, bool)): nested_resource = str(nested_resource) if isinstance(nested_resource, str) and re.match(PASSWORD_VALUE_RE, nested_resource) and (re.match(PASSWORD_KEY_RE, str(key), re.I) if PASSWORD_KEY_RE else True) and (not re.match(EXCLUDE_RE, str(nested_resource)) if EXCLUDE_RE else True): if shannon_entropy_password: (_, normalized_entropy) = shannon_entropy(nested_resource) if normalized_entropy > 0.965: errors.append({'leaked_password_path': 'resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path])}) issue_found = True logger.warning('Leaked Password at:%s\nvalue:%s' % ('resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path]), nested_resource)) else: issue_found = True errors.append({'leaked_password_path': 'resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path])}) logger.warning('Leaked Password at:%s\nvalue:%s' % ('resources/' + resource.get('type') + '/' + '/'.join([str(path) for path in path]), nested_resource)) output['issue'] = True if issue_found else False if errors: output['errors'] = errors output['skipped'] = skipped output = output except Exception as ex: print(traceback.format_exc()) output['issue'] = None output['err'] = str(ex) output['skipped'] = skipped output = output if output['issue'] == True: output['al_access_key_id_err'] = 'There is a possibility that AWS secret access key has leaked' elif output['issue'] == None: output['al_access_key_id_err'] = output['err'] output.pop('err') else: output['al_access_key_id_err'] = '' return output
cloud-validation-framework
positive
def prepare_config_and_inputs_for_common(self): <DeepExtract> input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RobertaConfig(vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) config_and_inputs = (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) </DeepExtract> (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return (config, inputs_dict)
def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RobertaConfig(vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range) config_and_inputs = (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return (config, inputs_dict)
automatic-personality-prediction
positive
def pick_start_pos(self): if self.random_starts: temp = np.random.uniform([self.boundary_min, self.boundary_min + 1.25], [self.boundary_max - 0.4, self.boundary_max], (self.action_dim,)) if not self.is_valid(temp[None, :]): <DeepExtract> if self.random_starts: temp = np.random.uniform([self.boundary_min, self.boundary_min + 1.25], [self.boundary_max - 0.4, self.boundary_max], (self.action_dim,)) if not self.is_valid(temp[None, :]): temp = self.pick_start_pos() else: temp = self.start temp = temp </DeepExtract> else: temp = self.start return temp
def pick_start_pos(self): if self.random_starts: temp = np.random.uniform([self.boundary_min, self.boundary_min + 1.25], [self.boundary_max - 0.4, self.boundary_max], (self.action_dim,)) if not self.is_valid(temp[None, :]): if self.random_starts: temp = np.random.uniform([self.boundary_min, self.boundary_min + 1.25], [self.boundary_max - 0.4, self.boundary_max], (self.action_dim,)) if not self.is_valid(temp[None, :]): temp = self.pick_start_pos() else: temp = self.start temp = temp else: temp = self.start return temp
berkeley-deep-RL-pytorch-starter
positive
def json_renderer(args, data, view_name): """Render a response as JSON""" status_code = 200 json_cols = [] if '_json' in args: json_cols = args.getlist('_json') if json_cols and 'rows' in data and ('columns' in data): <DeepExtract> json_cols = set(json_cols) if not json_cols.intersection(data['columns']): data['rows'] = data['rows'] new_rows = [] for row in data['rows']: new_row = [] for (value, column) in zip(row, data['columns']): if column in json_cols: try: value = json.loads(value) except (TypeError, ValueError) as e: pass new_row.append(value) new_rows.append(new_row) data['rows'] = new_rows </DeepExtract> if 'rows' in data and (not value_as_boolean(args.get('_json_infinity', '0'))): data['rows'] = [remove_infinites(row) for row in data['rows']] shape = args.get('_shape', 'objects') if data.get('error'): shape = 'objects' if shape == 'arrayfirst': if not data['rows']: data = [] elif isinstance(data['rows'][0], sqlite3.Row): data = [row[0] for row in data['rows']] else: assert isinstance(data['rows'][0], dict) data = [next(iter(row.values())) for row in data['rows']] elif shape in ('objects', 'object', 'array'): columns = data.get('columns') rows = data.get('rows') if rows and columns: data['rows'] = [dict(zip(columns, row)) for row in rows] if shape == 'object': error = None if 'primary_keys' not in data: error = '_shape=object is only available on tables' else: pks = data['primary_keys'] if not pks: error = '_shape=object not available for tables with no primary keys' else: object_rows = {} for row in data['rows']: pk_string = path_from_row_pks(row, pks, not pks) object_rows[pk_string] = row data = object_rows if error: data = {'ok': False, 'error': error} elif shape == 'array': data = data['rows'] elif shape == 'arrays': if not data['rows']: pass elif isinstance(data['rows'][0], sqlite3.Row): data['rows'] = [list(row) for row in data['rows']] else: data['rows'] = [list(row.values()) for row in data['rows']] else: status_code = 400 data = {'ok': False, 'error': f'Invalid _shape: {shape}', 'status': 400, 'title': None} nl = args.get('_nl', '') if nl and shape == 'array': body = '\n'.join((json.dumps(item, cls=CustomJSONEncoder) for item in data)) content_type = 'text/plain' else: body = json.dumps(data, cls=CustomJSONEncoder) content_type = 'application/json; charset=utf-8' headers = {} return Response(body, status=status_code, headers=headers, content_type=content_type)
def json_renderer(args, data, view_name): """Render a response as JSON""" status_code = 200 json_cols = [] if '_json' in args: json_cols = args.getlist('_json') if json_cols and 'rows' in data and ('columns' in data): json_cols = set(json_cols) if not json_cols.intersection(data['columns']): data['rows'] = data['rows'] new_rows = [] for row in data['rows']: new_row = [] for (value, column) in zip(row, data['columns']): if column in json_cols: try: value = json.loads(value) except (TypeError, ValueError) as e: pass new_row.append(value) new_rows.append(new_row) data['rows'] = new_rows if 'rows' in data and (not value_as_boolean(args.get('_json_infinity', '0'))): data['rows'] = [remove_infinites(row) for row in data['rows']] shape = args.get('_shape', 'objects') if data.get('error'): shape = 'objects' if shape == 'arrayfirst': if not data['rows']: data = [] elif isinstance(data['rows'][0], sqlite3.Row): data = [row[0] for row in data['rows']] else: assert isinstance(data['rows'][0], dict) data = [next(iter(row.values())) for row in data['rows']] elif shape in ('objects', 'object', 'array'): columns = data.get('columns') rows = data.get('rows') if rows and columns: data['rows'] = [dict(zip(columns, row)) for row in rows] if shape == 'object': error = None if 'primary_keys' not in data: error = '_shape=object is only available on tables' else: pks = data['primary_keys'] if not pks: error = '_shape=object not available for tables with no primary keys' else: object_rows = {} for row in data['rows']: pk_string = path_from_row_pks(row, pks, not pks) object_rows[pk_string] = row data = object_rows if error: data = {'ok': False, 'error': error} elif shape == 'array': data = data['rows'] elif shape == 'arrays': if not data['rows']: pass elif isinstance(data['rows'][0], sqlite3.Row): data['rows'] = [list(row) for row in data['rows']] else: data['rows'] = [list(row.values()) for row in data['rows']] else: status_code = 400 data = {'ok': False, 'error': f'Invalid _shape: {shape}', 'status': 400, 'title': None} nl = args.get('_nl', '') if nl and shape == 'array': body = '\n'.join((json.dumps(item, cls=CustomJSONEncoder) for item in data)) content_type = 'text/plain' else: body = json.dumps(data, cls=CustomJSONEncoder) content_type = 'application/json; charset=utf-8' headers = {} return Response(body, status=status_code, headers=headers, content_type=content_type)
datasette
positive
def intersection_matrix(self, return_rank=False, gamma=1): """ Union matrix between conceptors :param return_rank: Rank of union """ if return_rank: intersection_matrix = torch.zeros(self.count, self.count, dtype=torch.long) else: intersection_matrix = torch.zeros(self.count, self.count) for (i, i_name) in enumerate(self._conceptors.keys()): for (j, j_name) in enumerate(self._conceptors.keys()): E = Conceptor.operator_AND(self._conceptors[i_name], self._conceptors[j_name]) if gamma != 1: E.PHI(gamma) if return_rank: intersection_matrix[i, j] = rank(E.C) else: <DeepExtract> if self.is_null(): intersection_matrix[i, j] = 0.0 else: intersection_matrix[i, j] = self.A().quota </DeepExtract> return intersection_matrix
def intersection_matrix(self, return_rank=False, gamma=1): """ Union matrix between conceptors :param return_rank: Rank of union """ if return_rank: intersection_matrix = torch.zeros(self.count, self.count, dtype=torch.long) else: intersection_matrix = torch.zeros(self.count, self.count) for (i, i_name) in enumerate(self._conceptors.keys()): for (j, j_name) in enumerate(self._conceptors.keys()): E = Conceptor.operator_AND(self._conceptors[i_name], self._conceptors[j_name]) if gamma != 1: E.PHI(gamma) if return_rank: intersection_matrix[i, j] = rank(E.C) else: if self.is_null(): intersection_matrix[i, j] = 0.0 else: intersection_matrix[i, j] = self.A().quota return intersection_matrix
EchoTorch
positive
def sendUpdateDeviceLocationToActionBatchQueue(p_apiKey, p_orgId, p_networkId, p_serial, p_location): body = {'address': p_location, 'moveMapMarker': True} action = {'resource': '/networks/' + p_networkId + '/devices/' + p_serial, 'operation': 'update', 'body': body} <DeepExtract> global ACTION_BATCH_QUEUE if not action is None: ACTION_BATCH_QUEUE.append(action) queueLength = len(ACTION_BATCH_QUEUE) if queueLength == 100 or (queueLength > 0 and p_forceCommit): print('Submitting action batch:') print(ACTION_BATCH_QUEUE) batchId = createActionBatch(p_apiKey, p_orgId, ACTION_BATCH_QUEUE) ACTION_BATCH_QUEUE = [] if not batchId is None: print('Submitted with batchId %s' % batchId) (success, batchId) = (True, str(batchId)) else: (success, batchId) = (False, None) (success, batchId) = (True, None) </DeepExtract> if not success: print('ERROR 16: Failed to queue action batch') return (success, batchId)
def sendUpdateDeviceLocationToActionBatchQueue(p_apiKey, p_orgId, p_networkId, p_serial, p_location): body = {'address': p_location, 'moveMapMarker': True} action = {'resource': '/networks/' + p_networkId + '/devices/' + p_serial, 'operation': 'update', 'body': body} global ACTION_BATCH_QUEUE if not action is None: ACTION_BATCH_QUEUE.append(action) queueLength = len(ACTION_BATCH_QUEUE) if queueLength == 100 or (queueLength > 0 and p_forceCommit): print('Submitting action batch:') print(ACTION_BATCH_QUEUE) batchId = createActionBatch(p_apiKey, p_orgId, ACTION_BATCH_QUEUE) ACTION_BATCH_QUEUE = [] if not batchId is None: print('Submitted with batchId %s' % batchId) (success, batchId) = (True, str(batchId)) else: (success, batchId) = (False, None) (success, batchId) = (True, None) if not success: print('ERROR 16: Failed to queue action batch') return (success, batchId)
automation-scripts
positive
def init_after_subscription_selected(self): self.account_status = 'Loading' maya.refresh() if self.batch_account_row is not None: maya.delete_ui(self.batch_account_row) if self.account_ui_elements: maya.delete_ui(self.account_ui_elements) <DeepExtract> maya.form_layout(self.page, edit=True, enable=False) </DeepExtract> self.account_ui_elements = [] with utils.Row(2, 2, (100, 200), ('left', 'left'), parent=self.batch_account_framelayout) as batch_account_row: self.batch_account_row = batch_account_row maya.text(label='Batch Account: ', align='right') with utils.Dropdown(self.select_account_in_dropdown) as account_dropdown: self._account_dropdown = account_dropdown self._account_dropdown.add_item('') accounts = self.base.available_batch_accounts() self.accounts_by_name = dict([(account.name, account) for account in accounts]) for account in accounts: self._account_dropdown.add_item(account.name) self.account_status = 'Please select Batch Account' <DeepExtract> box_label = 'Plugin Settings' if self.plugin_settings_framelayout is not None: maya.delete_ui(self.plugin_settings_framelayout) with utils.FrameLayout(label=box_label, collapsable=True, width=345, collapse=True, parent=self.frames_layout) as plugin_settings_framelayout: self.plugin_settings_framelayout = plugin_settings_framelayout self.account_ui_elements.append(plugin_settings_framelayout) with utils.Row(2, 2, (100, 200), ('left', 'left'), parent=plugin_settings_framelayout) as threadsRow: self.account_ui_elements.append(threadsRow) maya.text(label='Threads: ', align='right') self._threads = maya.int_field(changeCommand=self.set_threads, annotation='The maximum number of parallel threads to use for uploading of assets', height=25, minValue=1, maxValue=40, enable=True, value=self.base.threads) with utils.Row(2, 2, (100, 200), ('left', 'center'), parent=plugin_settings_framelayout) as loggingRow: self.account_ui_elements.append(loggingRow) maya.text(label='Logging: ', align='right') with utils.Dropdown(self.set_logging) as log_settings: self._logging = log_settings self._logging.add_item('Debug') self._logging.add_item('Info') self._logging.add_item('Warning') self._logging.add_item('Error') </DeepExtract> <DeepExtract> maya.form_layout(self.page, edit=True, enable=True) </DeepExtract> maya.refresh()
def init_after_subscription_selected(self): self.account_status = 'Loading' maya.refresh() if self.batch_account_row is not None: maya.delete_ui(self.batch_account_row) if self.account_ui_elements: maya.delete_ui(self.account_ui_elements) maya.form_layout(self.page, edit=True, enable=False) self.account_ui_elements = [] with utils.Row(2, 2, (100, 200), ('left', 'left'), parent=self.batch_account_framelayout) as batch_account_row: self.batch_account_row = batch_account_row maya.text(label='Batch Account: ', align='right') with utils.Dropdown(self.select_account_in_dropdown) as account_dropdown: self._account_dropdown = account_dropdown self._account_dropdown.add_item('') accounts = self.base.available_batch_accounts() self.accounts_by_name = dict([(account.name, account) for account in accounts]) for account in accounts: self._account_dropdown.add_item(account.name) self.account_status = 'Please select Batch Account' box_label = 'Plugin Settings' if self.plugin_settings_framelayout is not None: maya.delete_ui(self.plugin_settings_framelayout) with utils.FrameLayout(label=box_label, collapsable=True, width=345, collapse=True, parent=self.frames_layout) as plugin_settings_framelayout: self.plugin_settings_framelayout = plugin_settings_framelayout self.account_ui_elements.append(plugin_settings_framelayout) with utils.Row(2, 2, (100, 200), ('left', 'left'), parent=plugin_settings_framelayout) as threadsRow: self.account_ui_elements.append(threadsRow) maya.text(label='Threads: ', align='right') self._threads = maya.int_field(changeCommand=self.set_threads, annotation='The maximum number of parallel threads to use for uploading of assets', height=25, minValue=1, maxValue=40, enable=True, value=self.base.threads) with utils.Row(2, 2, (100, 200), ('left', 'center'), parent=plugin_settings_framelayout) as loggingRow: self.account_ui_elements.append(loggingRow) maya.text(label='Logging: ', align='right') with utils.Dropdown(self.set_logging) as log_settings: self._logging = log_settings self._logging.add_item('Debug') self._logging.add_item('Info') self._logging.add_item('Warning') self._logging.add_item('Error') maya.form_layout(self.page, edit=True, enable=True) maya.refresh()
azure-batch-maya
positive
def test_burn_key_digest1(self): <DeepExtract> full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', 'burn_key_digest -h']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) return output </DeepExtract> <DeepExtract> full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_key_digest {S_IMAGES_DIR}/ecdsa192_secure_boot_signing_key_v2.pem']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) return output </DeepExtract> <DeepExtract> full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', 'summary -d']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) output = output </DeepExtract> assert ' = 1e 3d 15 16 96 ca 7f 22 a6 e8 8b d5 27 a0 3b 3b R/-' in output assert ' = 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 3d 15 16 96 ca 7f 22 a6 e8 8b d5 27 a0 3b 3b R/-' in output
def test_burn_key_digest1(self): full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', 'burn_key_digest -h']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) return output full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', f'burn_key_digest {S_IMAGES_DIR}/ecdsa192_secure_boot_signing_key_v2.pem']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) return output full_cmd = ' '.join([self.base_cmd, '--do-not-confirm' if do_not_confirm else '', 'summary -d']) output = self._run_command(full_cmd, check_msg, ret_code) self._run_command(' '.join([self.base_cmd, 'check_error']), 'No errors detected', 0) print(output) output = output assert ' = 1e 3d 15 16 96 ca 7f 22 a6 e8 8b d5 27 a0 3b 3b R/-' in output assert ' = 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 3d 15 16 96 ca 7f 22 a6 e8 8b d5 27 a0 3b 3b R/-' in output
esptool
positive
@BACKBONE_REGISTRY.register() def resnet18_dynamic_ms_l12(pretrained=True, **kwargs) -> ResNet: <DeepExtract> model = ResNet(BasicBlockDynamic, [2, 2, 2, 2], **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['resnet18_dynamic'], progress=True) removed_keys = model.has_fc is False or (model.has_fc is True and model.out_features != 1000) removed_keys = ['fc.weight', 'fc.bias'] if removed_keys else [] for key in removed_keys: state_dict.pop(key) allowed_missing_keys = removed_keys if model.has_fc else None load_state_dict(model, state_dict, allowed_missing_keys) model = model </DeepExtract> return model
@BACKBONE_REGISTRY.register() def resnet18_dynamic_ms_l12(pretrained=True, **kwargs) -> ResNet: model = ResNet(BasicBlockDynamic, [2, 2, 2, 2], **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['resnet18_dynamic'], progress=True) removed_keys = model.has_fc is False or (model.has_fc is True and model.out_features != 1000) removed_keys = ['fc.weight', 'fc.bias'] if removed_keys else [] for key in removed_keys: state_dict.pop(key) allowed_missing_keys = removed_keys if model.has_fc else None load_state_dict(model, state_dict, allowed_missing_keys) model = model return model
Dassl.pytorch
positive
def get_tensor_max_abs(t, per_dim=None): <DeepExtract> if per_dim is None: (min_val, max_val) = (t.min(), t.max()) if per_dim >= t.dim(): raise ValueError('Got per_dim={0}, but tensor only has {1} dimensions', per_dim, t.dim()) view_dims = [t.shape[i] for i in range(per_dim + 1)] + [-1] tv = t.view(*view_dims) (min_val, max_val) = (tv.min(dim=-1)[0], tv.max(dim=-1)[0]) </DeepExtract> return torch.max(min_val.abs_(), max_val.abs_())
def get_tensor_max_abs(t, per_dim=None): if per_dim is None: (min_val, max_val) = (t.min(), t.max()) if per_dim >= t.dim(): raise ValueError('Got per_dim={0}, but tensor only has {1} dimensions', per_dim, t.dim()) view_dims = [t.shape[i] for i in range(per_dim + 1)] + [-1] tv = t.view(*view_dims) (min_val, max_val) = (tv.min(dim=-1)[0], tv.max(dim=-1)[0]) return torch.max(min_val.abs_(), max_val.abs_())
EagleEye
positive
def on_setting_changed(self, argument): if isinstance(argument, qargparse.Button): if argument['name'] == 'resetLayout': <DeepExtract> self._docks['console'].append('Restoring layout..', level) self.statusBar().showMessage('Restoring layout..', 2000) </DeepExtract> geometry = self._ctrl.state.retrieve('default/geometry') window = self._ctrl.state.retrieve('default/windowState') self.restoreGeometry(geometry) self.restoreState(window) return key = argument['name'] value = argument.read() <DeepExtract> self._docks['console'].append('Storing %s = %s' % (key, value), level) self.statusBar().showMessage('Storing %s = %s' % (key, value), 2000) </DeepExtract> self._ctrl.state.store(argument['name'], argument.read()) if key == 'showAdvancedControls': <DeepExtract> shown = bool(self._ctrl.state.retrieve('showAdvancedControls')) self._widgets['fullCommand'].setVisible(shown) toggles = self._widgets['dockToggles'].layout() for index in range(toggles.count()): item = toggles.itemAt(index) widget = item.widget() dock = widget.dock visible = not dock.advanced or shown widget.setVisible(visible) if not visible: dock.hide() </DeepExtract> if key in ('showAllApps', 'showHiddenApps', 'patchWithFilter'): self._ctrl.reset() if key == 'showAllVersions': self._ctrl.select_application(self._ctrl.state['appRequest']) if key == 'exclusionFilter': allzparkconfig.exclude_filter = value self._ctrl.reset() if key == 'theme': user_css = self._ctrl.state.retrieve('userCss', '') self._originalcss = res.load_theme(value) self.setStyleSheet('\n'.join([self._originalcss, res.format_stylesheet(user_css)]))
def on_setting_changed(self, argument): if isinstance(argument, qargparse.Button): if argument['name'] == 'resetLayout': self._docks['console'].append('Restoring layout..', level) self.statusBar().showMessage('Restoring layout..', 2000) geometry = self._ctrl.state.retrieve('default/geometry') window = self._ctrl.state.retrieve('default/windowState') self.restoreGeometry(geometry) self.restoreState(window) return key = argument['name'] value = argument.read() self._docks['console'].append('Storing %s = %s' % (key, value), level) self.statusBar().showMessage('Storing %s = %s' % (key, value), 2000) self._ctrl.state.store(argument['name'], argument.read()) if key == 'showAdvancedControls': shown = bool(self._ctrl.state.retrieve('showAdvancedControls')) self._widgets['fullCommand'].setVisible(shown) toggles = self._widgets['dockToggles'].layout() for index in range(toggles.count()): item = toggles.itemAt(index) widget = item.widget() dock = widget.dock visible = not dock.advanced or shown widget.setVisible(visible) if not visible: dock.hide() if key in ('showAllApps', 'showHiddenApps', 'patchWithFilter'): self._ctrl.reset() if key == 'showAllVersions': self._ctrl.select_application(self._ctrl.state['appRequest']) if key == 'exclusionFilter': allzparkconfig.exclude_filter = value self._ctrl.reset() if key == 'theme': user_css = self._ctrl.state.retrieve('userCss', '') self._originalcss = res.load_theme(value) self.setStyleSheet('\n'.join([self._originalcss, res.format_stylesheet(user_css)]))
allzpark
positive
def main(argv): logging.getLogger().setLevel(logging.INFO) FLAGS(argv) logging.info('Listing images...') images = glob.glob(os.path.join(FLAGS.root, '*', '*.' + FLAGS.extension)) random.shuffle(images) logging.info('A total of %d images', len(images)) labels = list(set((os.path.dirname(image) for image in images))) labels.sort() logging.info('A total of %d identical classes', len(labels)) label2id = dict(((n, i) for (i, n) in enumerate(labels))) image_writer = puff.PuffStreamedWriter(FLAGS.output) labels = [] current = 0 my_timer = Timer() for filename in images: <DeepExtract> img = io.imread(filename) if img.ndim == 2: img = np.tile(img[:, :, np.newaxis], (1, 1, 3)) if FLAGS.size == 0: image_cropped = img if img.shape[0] < img.shape[1]: newshape = (FLAGS.size, int(img.shape[1] * float(FLAGS.size) / img.shape[0] + 0.5)) else: newshape = (int(img.shape[0] * float(FLAGS.size) / img.shape[1] + 0.5), FLAGS.size) if img.shape[2] == 4: img = img[:, :, :3] img = transform.resize(img, newshape) if img.shape[0] > FLAGS.size: offset = int(img.shape[0] - FLAGS.size) image_cropped = (img[offset:offset + FLAGS.size] * 255).astype(np.uint8) else: offset = int(img.shape[1] - FLAGS.size) image_cropped = (img[:, offset:offset + FLAGS.size] * 255).astype(np.uint8) </DeepExtract> labels.append(label2id[os.path.dirname(filename)]) image_writer.write_single(image_cropped) current += 1 if current % 1000 == 0: logging.info('Processed %d images, elapsed %s', current, my_timer.lap()) image_writer.finish() puff.write_puff(np.array(labels, dtype=np.int), FLAGS.output_label) logging.info('Done.')
def main(argv): logging.getLogger().setLevel(logging.INFO) FLAGS(argv) logging.info('Listing images...') images = glob.glob(os.path.join(FLAGS.root, '*', '*.' + FLAGS.extension)) random.shuffle(images) logging.info('A total of %d images', len(images)) labels = list(set((os.path.dirname(image) for image in images))) labels.sort() logging.info('A total of %d identical classes', len(labels)) label2id = dict(((n, i) for (i, n) in enumerate(labels))) image_writer = puff.PuffStreamedWriter(FLAGS.output) labels = [] current = 0 my_timer = Timer() for filename in images: img = io.imread(filename) if img.ndim == 2: img = np.tile(img[:, :, np.newaxis], (1, 1, 3)) if FLAGS.size == 0: image_cropped = img if img.shape[0] < img.shape[1]: newshape = (FLAGS.size, int(img.shape[1] * float(FLAGS.size) / img.shape[0] + 0.5)) else: newshape = (int(img.shape[0] * float(FLAGS.size) / img.shape[1] + 0.5), FLAGS.size) if img.shape[2] == 4: img = img[:, :, :3] img = transform.resize(img, newshape) if img.shape[0] > FLAGS.size: offset = int(img.shape[0] - FLAGS.size) image_cropped = (img[offset:offset + FLAGS.size] * 255).astype(np.uint8) else: offset = int(img.shape[1] - FLAGS.size) image_cropped = (img[:, offset:offset + FLAGS.size] * 255).astype(np.uint8) labels.append(label2id[os.path.dirname(filename)]) image_writer.write_single(image_cropped) current += 1 if current % 1000 == 0: logging.info('Processed %d images, elapsed %s', current, my_timer.lap()) image_writer.finish() puff.write_puff(np.array(labels, dtype=np.int), FLAGS.output_label) logging.info('Done.')
decaf
positive
@retry(stop_max_attempt_number=3, wait_fixed=200) def status(self): """ Get current status. """ <DeepExtract> res = super(SPM2Conn, self).read_block(REG_STATUS, 5) if not res: log.warn('Empty response received when reading register {:}'.format(REG_STATUS)) raise Exception('Empty response received when reading register {:}'.format(REG_STATUS)) if res == [r for r in res if r == 255]: log.warn('Received seemingly invalid response when reading register {:}: {:}'.format(REG_STATUS, res)) raise Exception('Received seemingly invalid response when reading register {:}: {:}'.format(REG_STATUS, res)) res = res </DeepExtract> ret = {'current_state': STATES.get(res[0], 'invalid'), 'last_state': {'up': STATES.get(res[1], 'invalid'), 'down': STATES.get(res[2], 'invalid')}, 'last_trigger': {'up': TRIGGERS.get(res[3], 'invalid'), 'down': TRIGGERS.get(res[4], 'invalid')}} return ret
@retry(stop_max_attempt_number=3, wait_fixed=200) def status(self): """ Get current status. """ res = super(SPM2Conn, self).read_block(REG_STATUS, 5) if not res: log.warn('Empty response received when reading register {:}'.format(REG_STATUS)) raise Exception('Empty response received when reading register {:}'.format(REG_STATUS)) if res == [r for r in res if r == 255]: log.warn('Received seemingly invalid response when reading register {:}: {:}'.format(REG_STATUS, res)) raise Exception('Received seemingly invalid response when reading register {:}: {:}'.format(REG_STATUS, res)) res = res ret = {'current_state': STATES.get(res[0], 'invalid'), 'last_state': {'up': STATES.get(res[1], 'invalid'), 'down': STATES.get(res[2], 'invalid')}, 'last_trigger': {'up': TRIGGERS.get(res[3], 'invalid'), 'down': TRIGGERS.get(res[4], 'invalid')}} return ret
autopi-core
positive
def get_boolean(self, name): <DeepExtract> if name not in self.json_object: raise BinanceApiException(BinanceApiException.RUNTIME_ERROR, '[Json] Get json item field: ' + name + ' does not exist') </DeepExtract> return bool(self.json_object[name])
def get_boolean(self, name): if name not in self.json_object: raise BinanceApiException(BinanceApiException.RUNTIME_ERROR, '[Json] Get json item field: ' + name + ' does not exist') return bool(self.json_object[name])
Binance_Futures_python
positive
def suma_download(module, suma_params): """ Dowload (or preview) action suma_params['action'] should be set to either 'preview' or 'download'. arguments: module (dict): The Ansible module suma_params (dict): parameters to build the suma command note: Exits with fail_json in case of error """ targets_list = suma_params['targets'] req_oslevel = suma_params['req_oslevel'] if not targets_list: if req_oslevel == 'Latest': msg = 'Oslevel target could not be empty or equal "Latest" when target machine list is empty' module.log(msg) results['msg'] = msg module.fail_json(**results) elif re.match('^([0-9]{4}-[0-9]{2})(-00|-00-0000)$', req_oslevel): msg = 'When no Service Pack is provided, a target machine list is required' module.log(msg) results['msg'] = msg module.fail_json(**results) elif re.match('^([0-9]{4})(|-00|-00-00|-00-00-0000)$', req_oslevel): msg = 'Specify a non 0 value for the Technical Level or the Service Pack' module.log(msg) results['msg'] = msg module.fail_json(**results) <DeepExtract> lpp_source_list = {} cmd = ['lsnim', '-t', 'lpp_source', '-l'] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Cannot get the list of lpp source, command '{0}' failed with return code {1}".format(cmd, rc) module.log(msg) results['msg'] = msg results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr module.fail_json(**results) for line in stdout.rstrip().split('\n'): match_key = re.match('^(\\S+):', line) if match_key: obj_key = match_key.group(1) else: match_loc = re.match('^\\s+location\\s+=\\s+(\\S+)$', line) if match_loc: loc = match_loc.group(1) lpp_source_list[obj_key] = loc nim_lpp_sources = lpp_source_list </DeepExtract> module.debug('lpp source list: {0}'.format(nim_lpp_sources)) <DeepExtract> clients_list = [] cmd = ['lsnim', '-t', 'standalone'] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = "Command '{0}' failed with return code {1}.".format(' '.join(cmd), rc) module.fail_json(**results) for line in stdout.rstrip().splitlines(): clients_list.append(line.strip().split()[0]) nim_clients = clients_list </DeepExtract> nim_clients.append('master') module.debug('NIM Clients: {0}'.format(nim_clients)) <DeepExtract> clients = [] if len(targets_list) == 0: target_clients = clients for target in targets_list: rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in nim_clients: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in nim_clients: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = nim_clients continue if target in nim_clients or target == 'master': clients.append(target) target_clients = list(set(clients)) </DeepExtract> results['targets'] = target_clients if targets_list and (not target_clients): msg = "No matching NIM client found for target '{0}'.".format(suma_params['targets']) module.log(msg) results['msg'] = msg module.fail_json(**results) module.debug('Target list: {0}'.format(target_clients)) <DeepExtract> threads = [] oslevels = {} for machine in target_clients: process = threading.Thread(target=run_oslevel_cmd, args=(module, machine, oslevels)) process.start() threads.append(process) for process in threads: process.join(300) if process.is_alive(): module.log('[WARNING] {0} Not responding'.format(process)) clients_oslevel = oslevels </DeepExtract> module.debug('client oslevel dict: {0}'.format(clients_oslevel)) removed_oslevel = [] for key in [k for (k, v) in clients_oslevel.items() if not v]: removed_oslevel.append(key) del clients_oslevel[key] if targets_list and (not clients_oslevel): msg = 'Cannot retrieve oslevel for any NIM client of the target list' module.log(msg) results['msg'] = msg module.fail_json(**results) module.debug('oslevel cleaned dict: {0}'.format(clients_oslevel)) if removed_oslevel: msg = 'Unavailable client: {0}'.format(removed_oslevel) module.log('[WARNING] ' + msg) results['meta']['messages'].append(msg) <DeepExtract> if suma_params['req_oslevel'] is None or not suma_params['req_oslevel'].strip() or suma_params['req_oslevel'] == 'Latest': rq_type = 'Latest' if re.match('^([0-9]{4}-[0-9]{2})$', suma_params['req_oslevel']) and (not targets_list): rq_type = 'Latest' if re.match('^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$', suma_params['req_oslevel']): rq_type = 'TL' if re.match('^([0-9]{4}-[0-9]{2}-[0-9]{2})(|-[0-9]{4})$', suma_params['req_oslevel']): rq_type = 'SP' rq_type = 'ERROR' </DeepExtract> if rq_type == 'ERROR': msg = "Invalid oslevel: '{0}'".format(suma_params['req_oslevel']) module.log(msg) results['msg'] = msg module.fail_json(**results) suma_params['RqType'] = rq_type module.debug('SUMA req Type: {0}'.format(rq_type)) <DeepExtract> rq_name = '' if rq_type == 'Latest': if not clients_oslevel: if suma_params['req_oslevel'] == 'Latest': msg = 'Cannot get oslevel from targets, check you can get the oslevel on targets' module.log(msg) results['msg'] = msg module.fail_json(**results) metadata_filter_ml = suma_params['req_oslevel'][:7] if len(metadata_filter_ml) == 4: metadata_filter_ml += '-00' else: tl_max = re.match('^([0-9]{4}-[0-9]{2})(|-[0-9]{2}|-[0-9]{2}-[0-9]{4})$', max_oslevel(clients_oslevel)).group(1) tl_min = re.match('^([0-9]{4}-[0-9]{2})(|-[0-9]{2}|-[0-9]{2}-[0-9]{4})$', min_oslevel(clients_oslevel)).group(1) if re.match('^([0-9]{4})', tl_min).group(1) != re.match('^([0-9]{4})', tl_max).group(1): module.log('[WARNING] release level mismatch, only AIX {0} SP/TL will be downloaded\n\n'.format(tl_max[:2])) metadata_filter_ml = tl_max if not metadata_filter_ml: msg = 'Cannot build minimum level filter based on the OS level of targets' module.log(msg) results['msg'] = msg module.fail_json(**results) if not os.path.exists(suma_params['metadata_dir']): os.makedirs(suma_params['metadata_dir']) cmd = ['/usr/sbin/suma', '-x', '-a', 'Action=Metadata', '-a', 'RqType=Latest'] cmd += ['-a', 'DLTarget={0}'.format(suma_params['metadata_dir'])] cmd += ['-a', 'FilterML={0}'.format(metadata_filter_ml)] cmd += ['-a', 'DisplayName="{0}"'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['metadata_dir'])] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Suma metadata command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = msg module.fail_json(**results) module.debug("SUMA command '{0}' rc:{1}, stdout:{2}".format(' '.join(cmd), rc, stdout)) sp_version = None file_name = suma_params['metadata_dir'] + '/installp/ppc/' + metadata_filter_ml + '*.xml' module.debug('searched files: {0}'.format(file_name)) files = glob.glob(file_name) module.debug('searching SP in files: {0}'.format(files)) for cur_file in files: version = find_sp_version(module, cur_file) if sp_version is None or version > sp_version: sp_version = version rq_name = sp_version shutil.rmtree(suma_params['metadata_dir']) elif rq_type == 'TL': rq_name = re.match('^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$', suma_params['req_oslevel']).group(1) elif rq_type == 'SP': if re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{4}$', suma_params['req_oslevel']): rq_name = suma_params['req_oslevel'] elif re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', suma_params['req_oslevel']): metadata_filter_ml = re.match('^([0-9]{4}-[0-9]{2})-[0-9]{2}$', suma_params['req_oslevel']).group(1) if not os.path.exists(suma_params['metadata_dir']): os.makedirs(suma_params['metadata_dir']) cmd = ['/usr/sbin/suma', '-x', '-a', 'Action=Metadata', '-a', 'RqType=Latest'] cmd += ['-a', 'DLTarget={0}'.format(suma_params['metadata_dir'])] cmd += ['-a', 'FilterML={0}'.format(metadata_filter_ml)] cmd += ['-a', 'DisplayName="{0}"'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['metadata_dir'])] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Suma metadata command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = msg module.fail_json(**results) module.debug("SUMA command '{0}' rc:{1}, stdout:{2}".format(' '.join(cmd), rc, stdout)) sp_version = None cur_file = suma_params['metadata_dir'] + '/installp/ppc/' + suma_params['req_oslevel'] + '.xml' sp_version = find_sp_version(module, cur_file) rq_name = sp_version shutil.rmtree(suma_params['metadata_dir']) if not rq_name or not rq_name.strip(): msg = 'OS level {0} does not match any fixes'.format(suma_params['req_oslevel']) module.log(msg) results['msg'] = msg module.fail_json(**results) rq_name = rq_name </DeepExtract> suma_params['RqName'] = rq_name module.debug('Suma req Name: {0}'.format(rq_name)) <DeepExtract> minimum_oslevel = None filter_ml = None if not clients_oslevel: filter_ml = rq_name[:7] if len(filter_ml) == 4: filter_ml += '-00' else: for (key, value) in iter(clients_oslevel.items()): if re.match('^([0-9]{4})', value).group(1) == rq_name[:4] and re.match('^([0-9]{4}-[0-9]{2}-[0-9]{2})', value).group(1) < rq_name[:10] and (minimum_oslevel is None or value < minimum_oslevel): minimum_oslevel = value if minimum_oslevel is not None: filter_ml = minimum_oslevel[:7] filter_ml = filter_ml </DeepExtract> suma_params['FilterMl'] = filter_ml module.debug('SUMA req filter min Oslevel: {0}'.format(filter_ml)) if filter_ml is None: msg = 'There is no target machine matching the requested oslevel {0}.'.format(rq_name[:10]) module.log(msg) results['msg'] = msg module.fail_json(**results) <DeepExtract> lpp_src = '' oslevel = rq_name if suma_params['lpp_source_name'] and suma_params['lpp_source_name'].strip(): lpp_src = suma_params['lpp_source_name'] else: if re.match('^([0-9]{4}-[0-9]{2})$', oslevel): oslevel = oslevel + '-00-0000' lpp_src = '{0}-lpp_source'.format(oslevel) suma_params['lpp_source_name'] = lpp_src </DeepExtract> suma_params['LppSource'] = lpp_source module.debug('Lpp source name: {0}'.format(lpp_source)) <DeepExtract> if suma_params['download_dir']: dl_target = '{0}/{1}'.format(suma_params['download_dir'], lpp_source) if lpp_source in nim_lpp_sources and nim_lpp_sources[lpp_source] != dl_target: msg = "lpp source location mismatch. A lpp source '{0}' already exists with a location different from '{1}'".format(lpp_source, dl_target) module.log(msg) results['msg'] = msg module.fail_json(**results) elif lpp_source in nim_lpp_sources: dl_target = nim_lpp_sources[lpp_source] else: dl_target = '/usr/sys/inst.images' dl_target = dl_target </DeepExtract> suma_params['DLTarget'] = dl_target module.debug('DL target: {0}'.format(dl_target)) results['meta']['messages'].append('lpp_source will be: {0}.'.format(lpp_source)) results['meta']['messages'].append('lpp_source location will be: {0}.'.format(dl_target)) results['meta']['messages'].append('lpp_source will be available to update machines from {0}-00 to {1}.'.format(filter_ml, rq_name)) if rq_type == 'Latest': msg = 'The latest SP of {0} is: {1}'.format(filter_ml, rq_name) module.log(msg) results['meta']['messages'].append(msg) suma_params['comments'] = '"Updates from {0} to {1}, built by Ansible Aix Automate infrastructure updates tools"'.format(filter_ml, rq_name) if not os.path.exists(dl_target): os.makedirs(dl_target) <DeepExtract> rq_type = suma_params['RqType'] if rq_type == 'Latest': rq_type = 'SP' cmd = ['/usr/sbin/suma', '-x'] cmd += ['-a', 'RqType={0}'.format(rq_type)] cmd += ['-a', 'Action={0}'.format('Preview')] cmd += ['-a', 'FilterML={0}'.format(suma_params['FilterMl'])] cmd += ['-a', 'DLTarget={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'RqName={0}'.format(suma_params['RqName'])] cmd += ['-a', 'DisplayName={0}'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['DLTarget'])] if suma_params['extend_fs']: cmd += ['-a', 'Extend=y'] else: cmd += ['-a', 'Extend=n'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "Suma {0} command '{1}' failed with return code {2}".format('Preview', ' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) stdout = stdout </DeepExtract> module.debug('SUMA preview stdout:{0}'.format(stdout)) downloaded = 0 failed = 0 skipped = 0 for line in stdout.rstrip().splitlines(): line = line.rstrip() matched = re.match('^\\s+(\\d+)\\s+downloaded$', line) if matched: downloaded = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+failed$', line) if matched: failed = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+skipped$', line) if matched: skipped = int(matched.group(1)) msg = 'Preview summary : {0} to download, {1} failed, {2} skipped'.format(downloaded, failed, skipped) module.log(msg) if suma_params['action'] == 'preview': results['meta']['messages'].append(msg) return if downloaded == 0 and skipped == 0: return results['meta']['messages'].extend(stdout.rstrip().splitlines()) results['meta']['messages'].append(msg) if downloaded != 0: <DeepExtract> rq_type = suma_params['RqType'] if rq_type == 'Latest': rq_type = 'SP' cmd = ['/usr/sbin/suma', '-x'] cmd += ['-a', 'RqType={0}'.format(rq_type)] cmd += ['-a', 'Action={0}'.format('Download')] cmd += ['-a', 'FilterML={0}'.format(suma_params['FilterMl'])] cmd += ['-a', 'DLTarget={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'RqName={0}'.format(suma_params['RqName'])] cmd += ['-a', 'DisplayName={0}'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['DLTarget'])] if suma_params['extend_fs']: cmd += ['-a', 'Extend=y'] else: cmd += ['-a', 'Extend=n'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "Suma {0} command '{1}' failed with return code {2}".format('Download', ' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) stdout = stdout </DeepExtract> module.debug('SUMA dowload stdout:{0}'.format(stdout)) downloaded = 0 failed = 0 skipped = 0 for line in stdout.rstrip().splitlines(): line = line.rstrip() matched = re.match('^\\s+(\\d+)\\s+downloaded$', line) if matched: downloaded = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+failed$', line) if matched: failed = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+skipped$', line) if matched: skipped = int(matched.group(1)) msg = 'Download summary : {0} downloaded, {1} failed, {2} skipped'.format(downloaded, failed, skipped) if downloaded == 0 and skipped == 0: module.log(msg) results['meta']['messages'].append(msg) return module.log(msg) results['meta']['messages'].extend(stdout.rstrip().splitlines()) results['meta']['messages'].append(msg) if downloaded != 0: results['changed'] = True if not suma_params['download_only'] and lpp_source not in nim_lpp_sources: cmd = ['/usr/sbin/nim', '-o', 'define', '-t', 'lpp_source', '-a', 'server=master'] cmd += ['-a', 'location={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'packages=all'] cmd += ['-a', 'comments={0}'.format(suma_params['comments'])] cmd += ['{0}'.format(suma_params['LppSource'])] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "NIM command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr:{0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) results['changed'] = True
def suma_download(module, suma_params): """ Dowload (or preview) action suma_params['action'] should be set to either 'preview' or 'download'. arguments: module (dict): The Ansible module suma_params (dict): parameters to build the suma command note: Exits with fail_json in case of error """ targets_list = suma_params['targets'] req_oslevel = suma_params['req_oslevel'] if not targets_list: if req_oslevel == 'Latest': msg = 'Oslevel target could not be empty or equal "Latest" when target machine list is empty' module.log(msg) results['msg'] = msg module.fail_json(**results) elif re.match('^([0-9]{4}-[0-9]{2})(-00|-00-0000)$', req_oslevel): msg = 'When no Service Pack is provided, a target machine list is required' module.log(msg) results['msg'] = msg module.fail_json(**results) elif re.match('^([0-9]{4})(|-00|-00-00|-00-00-0000)$', req_oslevel): msg = 'Specify a non 0 value for the Technical Level or the Service Pack' module.log(msg) results['msg'] = msg module.fail_json(**results) lpp_source_list = {} cmd = ['lsnim', '-t', 'lpp_source', '-l'] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Cannot get the list of lpp source, command '{0}' failed with return code {1}".format(cmd, rc) module.log(msg) results['msg'] = msg results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr module.fail_json(**results) for line in stdout.rstrip().split('\n'): match_key = re.match('^(\\S+):', line) if match_key: obj_key = match_key.group(1) else: match_loc = re.match('^\\s+location\\s+=\\s+(\\S+)$', line) if match_loc: loc = match_loc.group(1) lpp_source_list[obj_key] = loc nim_lpp_sources = lpp_source_list module.debug('lpp source list: {0}'.format(nim_lpp_sources)) clients_list = [] cmd = ['lsnim', '-t', 'standalone'] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = "Command '{0}' failed with return code {1}.".format(' '.join(cmd), rc) module.fail_json(**results) for line in stdout.rstrip().splitlines(): clients_list.append(line.strip().split()[0]) nim_clients = clients_list nim_clients.append('master') module.debug('NIM Clients: {0}'.format(nim_clients)) clients = [] if len(targets_list) == 0: target_clients = clients for target in targets_list: rmatch = re.match('(\\w+)\\[(\\d+):(\\d+)\\]', target) if rmatch: name = rmatch.group(1) start = rmatch.group(2) end = rmatch.group(3) for i in range(int(start), int(end) + 1): curr_name = name + str(i) if curr_name in nim_clients: clients.append(curr_name) continue rmatch = re.match('(\\w+)\\*$', target) if rmatch: name = rmatch.group(1) for curr_name in nim_clients: if re.match('^%s\\.*' % name, curr_name): clients.append(curr_name) continue if target.upper() == 'ALL' or target == '*': clients = nim_clients continue if target in nim_clients or target == 'master': clients.append(target) target_clients = list(set(clients)) results['targets'] = target_clients if targets_list and (not target_clients): msg = "No matching NIM client found for target '{0}'.".format(suma_params['targets']) module.log(msg) results['msg'] = msg module.fail_json(**results) module.debug('Target list: {0}'.format(target_clients)) threads = [] oslevels = {} for machine in target_clients: process = threading.Thread(target=run_oslevel_cmd, args=(module, machine, oslevels)) process.start() threads.append(process) for process in threads: process.join(300) if process.is_alive(): module.log('[WARNING] {0} Not responding'.format(process)) clients_oslevel = oslevels module.debug('client oslevel dict: {0}'.format(clients_oslevel)) removed_oslevel = [] for key in [k for (k, v) in clients_oslevel.items() if not v]: removed_oslevel.append(key) del clients_oslevel[key] if targets_list and (not clients_oslevel): msg = 'Cannot retrieve oslevel for any NIM client of the target list' module.log(msg) results['msg'] = msg module.fail_json(**results) module.debug('oslevel cleaned dict: {0}'.format(clients_oslevel)) if removed_oslevel: msg = 'Unavailable client: {0}'.format(removed_oslevel) module.log('[WARNING] ' + msg) results['meta']['messages'].append(msg) if suma_params['req_oslevel'] is None or not suma_params['req_oslevel'].strip() or suma_params['req_oslevel'] == 'Latest': rq_type = 'Latest' if re.match('^([0-9]{4}-[0-9]{2})$', suma_params['req_oslevel']) and (not targets_list): rq_type = 'Latest' if re.match('^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$', suma_params['req_oslevel']): rq_type = 'TL' if re.match('^([0-9]{4}-[0-9]{2}-[0-9]{2})(|-[0-9]{4})$', suma_params['req_oslevel']): rq_type = 'SP' rq_type = 'ERROR' if rq_type == 'ERROR': msg = "Invalid oslevel: '{0}'".format(suma_params['req_oslevel']) module.log(msg) results['msg'] = msg module.fail_json(**results) suma_params['RqType'] = rq_type module.debug('SUMA req Type: {0}'.format(rq_type)) rq_name = '' if rq_type == 'Latest': if not clients_oslevel: if suma_params['req_oslevel'] == 'Latest': msg = 'Cannot get oslevel from targets, check you can get the oslevel on targets' module.log(msg) results['msg'] = msg module.fail_json(**results) metadata_filter_ml = suma_params['req_oslevel'][:7] if len(metadata_filter_ml) == 4: metadata_filter_ml += '-00' else: tl_max = re.match('^([0-9]{4}-[0-9]{2})(|-[0-9]{2}|-[0-9]{2}-[0-9]{4})$', max_oslevel(clients_oslevel)).group(1) tl_min = re.match('^([0-9]{4}-[0-9]{2})(|-[0-9]{2}|-[0-9]{2}-[0-9]{4})$', min_oslevel(clients_oslevel)).group(1) if re.match('^([0-9]{4})', tl_min).group(1) != re.match('^([0-9]{4})', tl_max).group(1): module.log('[WARNING] release level mismatch, only AIX {0} SP/TL will be downloaded\n\n'.format(tl_max[:2])) metadata_filter_ml = tl_max if not metadata_filter_ml: msg = 'Cannot build minimum level filter based on the OS level of targets' module.log(msg) results['msg'] = msg module.fail_json(**results) if not os.path.exists(suma_params['metadata_dir']): os.makedirs(suma_params['metadata_dir']) cmd = ['/usr/sbin/suma', '-x', '-a', 'Action=Metadata', '-a', 'RqType=Latest'] cmd += ['-a', 'DLTarget={0}'.format(suma_params['metadata_dir'])] cmd += ['-a', 'FilterML={0}'.format(metadata_filter_ml)] cmd += ['-a', 'DisplayName="{0}"'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['metadata_dir'])] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Suma metadata command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = msg module.fail_json(**results) module.debug("SUMA command '{0}' rc:{1}, stdout:{2}".format(' '.join(cmd), rc, stdout)) sp_version = None file_name = suma_params['metadata_dir'] + '/installp/ppc/' + metadata_filter_ml + '*.xml' module.debug('searched files: {0}'.format(file_name)) files = glob.glob(file_name) module.debug('searching SP in files: {0}'.format(files)) for cur_file in files: version = find_sp_version(module, cur_file) if sp_version is None or version > sp_version: sp_version = version rq_name = sp_version shutil.rmtree(suma_params['metadata_dir']) elif rq_type == 'TL': rq_name = re.match('^([0-9]{4}-[0-9]{2})(|-00|-00-0000)$', suma_params['req_oslevel']).group(1) elif rq_type == 'SP': if re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{4}$', suma_params['req_oslevel']): rq_name = suma_params['req_oslevel'] elif re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', suma_params['req_oslevel']): metadata_filter_ml = re.match('^([0-9]{4}-[0-9]{2})-[0-9]{2}$', suma_params['req_oslevel']).group(1) if not os.path.exists(suma_params['metadata_dir']): os.makedirs(suma_params['metadata_dir']) cmd = ['/usr/sbin/suma', '-x', '-a', 'Action=Metadata', '-a', 'RqType=Latest'] cmd += ['-a', 'DLTarget={0}'.format(suma_params['metadata_dir'])] cmd += ['-a', 'FilterML={0}'.format(metadata_filter_ml)] cmd += ['-a', 'DisplayName="{0}"'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['metadata_dir'])] (rc, stdout, stderr) = module.run_command(cmd) if rc != 0: msg = "Suma metadata command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr results['msg'] = msg module.fail_json(**results) module.debug("SUMA command '{0}' rc:{1}, stdout:{2}".format(' '.join(cmd), rc, stdout)) sp_version = None cur_file = suma_params['metadata_dir'] + '/installp/ppc/' + suma_params['req_oslevel'] + '.xml' sp_version = find_sp_version(module, cur_file) rq_name = sp_version shutil.rmtree(suma_params['metadata_dir']) if not rq_name or not rq_name.strip(): msg = 'OS level {0} does not match any fixes'.format(suma_params['req_oslevel']) module.log(msg) results['msg'] = msg module.fail_json(**results) rq_name = rq_name suma_params['RqName'] = rq_name module.debug('Suma req Name: {0}'.format(rq_name)) minimum_oslevel = None filter_ml = None if not clients_oslevel: filter_ml = rq_name[:7] if len(filter_ml) == 4: filter_ml += '-00' else: for (key, value) in iter(clients_oslevel.items()): if re.match('^([0-9]{4})', value).group(1) == rq_name[:4] and re.match('^([0-9]{4}-[0-9]{2}-[0-9]{2})', value).group(1) < rq_name[:10] and (minimum_oslevel is None or value < minimum_oslevel): minimum_oslevel = value if minimum_oslevel is not None: filter_ml = minimum_oslevel[:7] filter_ml = filter_ml suma_params['FilterMl'] = filter_ml module.debug('SUMA req filter min Oslevel: {0}'.format(filter_ml)) if filter_ml is None: msg = 'There is no target machine matching the requested oslevel {0}.'.format(rq_name[:10]) module.log(msg) results['msg'] = msg module.fail_json(**results) lpp_src = '' oslevel = rq_name if suma_params['lpp_source_name'] and suma_params['lpp_source_name'].strip(): lpp_src = suma_params['lpp_source_name'] else: if re.match('^([0-9]{4}-[0-9]{2})$', oslevel): oslevel = oslevel + '-00-0000' lpp_src = '{0}-lpp_source'.format(oslevel) suma_params['lpp_source_name'] = lpp_src suma_params['LppSource'] = lpp_source module.debug('Lpp source name: {0}'.format(lpp_source)) if suma_params['download_dir']: dl_target = '{0}/{1}'.format(suma_params['download_dir'], lpp_source) if lpp_source in nim_lpp_sources and nim_lpp_sources[lpp_source] != dl_target: msg = "lpp source location mismatch. A lpp source '{0}' already exists with a location different from '{1}'".format(lpp_source, dl_target) module.log(msg) results['msg'] = msg module.fail_json(**results) elif lpp_source in nim_lpp_sources: dl_target = nim_lpp_sources[lpp_source] else: dl_target = '/usr/sys/inst.images' dl_target = dl_target suma_params['DLTarget'] = dl_target module.debug('DL target: {0}'.format(dl_target)) results['meta']['messages'].append('lpp_source will be: {0}.'.format(lpp_source)) results['meta']['messages'].append('lpp_source location will be: {0}.'.format(dl_target)) results['meta']['messages'].append('lpp_source will be available to update machines from {0}-00 to {1}.'.format(filter_ml, rq_name)) if rq_type == 'Latest': msg = 'The latest SP of {0} is: {1}'.format(filter_ml, rq_name) module.log(msg) results['meta']['messages'].append(msg) suma_params['comments'] = '"Updates from {0} to {1}, built by Ansible Aix Automate infrastructure updates tools"'.format(filter_ml, rq_name) if not os.path.exists(dl_target): os.makedirs(dl_target) rq_type = suma_params['RqType'] if rq_type == 'Latest': rq_type = 'SP' cmd = ['/usr/sbin/suma', '-x'] cmd += ['-a', 'RqType={0}'.format(rq_type)] cmd += ['-a', 'Action={0}'.format('Preview')] cmd += ['-a', 'FilterML={0}'.format(suma_params['FilterMl'])] cmd += ['-a', 'DLTarget={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'RqName={0}'.format(suma_params['RqName'])] cmd += ['-a', 'DisplayName={0}'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['DLTarget'])] if suma_params['extend_fs']: cmd += ['-a', 'Extend=y'] else: cmd += ['-a', 'Extend=n'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "Suma {0} command '{1}' failed with return code {2}".format('Preview', ' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) stdout = stdout module.debug('SUMA preview stdout:{0}'.format(stdout)) downloaded = 0 failed = 0 skipped = 0 for line in stdout.rstrip().splitlines(): line = line.rstrip() matched = re.match('^\\s+(\\d+)\\s+downloaded$', line) if matched: downloaded = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+failed$', line) if matched: failed = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+skipped$', line) if matched: skipped = int(matched.group(1)) msg = 'Preview summary : {0} to download, {1} failed, {2} skipped'.format(downloaded, failed, skipped) module.log(msg) if suma_params['action'] == 'preview': results['meta']['messages'].append(msg) return if downloaded == 0 and skipped == 0: return results['meta']['messages'].extend(stdout.rstrip().splitlines()) results['meta']['messages'].append(msg) if downloaded != 0: rq_type = suma_params['RqType'] if rq_type == 'Latest': rq_type = 'SP' cmd = ['/usr/sbin/suma', '-x'] cmd += ['-a', 'RqType={0}'.format(rq_type)] cmd += ['-a', 'Action={0}'.format('Download')] cmd += ['-a', 'FilterML={0}'.format(suma_params['FilterMl'])] cmd += ['-a', 'DLTarget={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'RqName={0}'.format(suma_params['RqName'])] cmd += ['-a', 'DisplayName={0}'.format(suma_params['description'])] cmd += ['-a', 'FilterDir={0}'.format(suma_params['DLTarget'])] if suma_params['extend_fs']: cmd += ['-a', 'Extend=y'] else: cmd += ['-a', 'Extend=n'] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "Suma {0} command '{1}' failed with return code {2}".format('Download', ' '.join(cmd), rc) module.log(msg + ', stderr: {0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) stdout = stdout module.debug('SUMA dowload stdout:{0}'.format(stdout)) downloaded = 0 failed = 0 skipped = 0 for line in stdout.rstrip().splitlines(): line = line.rstrip() matched = re.match('^\\s+(\\d+)\\s+downloaded$', line) if matched: downloaded = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+failed$', line) if matched: failed = int(matched.group(1)) continue matched = re.match('^\\s+(\\d+)\\s+skipped$', line) if matched: skipped = int(matched.group(1)) msg = 'Download summary : {0} downloaded, {1} failed, {2} skipped'.format(downloaded, failed, skipped) if downloaded == 0 and skipped == 0: module.log(msg) results['meta']['messages'].append(msg) return module.log(msg) results['meta']['messages'].extend(stdout.rstrip().splitlines()) results['meta']['messages'].append(msg) if downloaded != 0: results['changed'] = True if not suma_params['download_only'] and lpp_source not in nim_lpp_sources: cmd = ['/usr/sbin/nim', '-o', 'define', '-t', 'lpp_source', '-a', 'server=master'] cmd += ['-a', 'location={0}'.format(suma_params['DLTarget'])] cmd += ['-a', 'packages=all'] cmd += ['-a', 'comments={0}'.format(suma_params['comments'])] cmd += ['{0}'.format(suma_params['LppSource'])] (rc, stdout, stderr) = module.run_command(cmd) results['cmd'] = ' '.join(cmd) results['stdout'] = stdout results['stderr'] = stderr if rc != 0: msg = "NIM command '{0}' failed with return code {1}".format(' '.join(cmd), rc) module.log(msg + ', stderr:{0}, stdout:{1}'.format(stderr, stdout)) results['msg'] = msg module.fail_json(**results) results['changed'] = True
ansible-power-aix
positive
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.0): np.random.seed(cfg.RNG_SEED) 'Test a Fast R-CNN network on an image database.' num_images = len(imdb.image_index) all_boxes = [[[] for _ in range(num_images)] for _ in range(imdb.num_classes)] output_dir = get_output_dir(imdb, weights_filename) _t = {'im_detect': Timer(), 'misc': Timer()} for i in range(num_images): im = cv2.imread(imdb.image_path_at(i)) _t['im_detect'].tic() <DeepExtract> (blobs, im_scales) = _get_blobs(im) assert len(im_scales) == 1, 'Only single-image batch implemented' im_blob = blobs['data'] blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32) (_, scores, bbox_pred, rois) = net.test_image(sess, blobs['data'], blobs['im_info']) boxes = rois[:, 1:5] / im_scales[0] scores = np.reshape(scores, [scores.shape[0], -1]) bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1]) if cfg.TEST.BBOX_REG: box_deltas = bbox_pred pred_boxes = bbox_transform_inv(boxes, box_deltas) pred_boxes = _clip_boxes(pred_boxes, im.shape) else: pred_boxes = np.tile(boxes, (1, scores.shape[1])) (scores, boxes) = (scores, pred_boxes) </DeepExtract> _t['im_detect'].toc() _t['misc'].tic() for j in range(1, imdb.num_classes): inds = np.where(scores[:, j] > thresh)[0] cls_scores = scores[inds, j] cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] all_boxes[j][i] = cls_dets if max_per_image > 0: image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in range(1, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time)) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL) print('Evaluating detections') imdb.evaluate_detections(all_boxes, output_dir)
def test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.0): np.random.seed(cfg.RNG_SEED) 'Test a Fast R-CNN network on an image database.' num_images = len(imdb.image_index) all_boxes = [[[] for _ in range(num_images)] for _ in range(imdb.num_classes)] output_dir = get_output_dir(imdb, weights_filename) _t = {'im_detect': Timer(), 'misc': Timer()} for i in range(num_images): im = cv2.imread(imdb.image_path_at(i)) _t['im_detect'].tic() (blobs, im_scales) = _get_blobs(im) assert len(im_scales) == 1, 'Only single-image batch implemented' im_blob = blobs['data'] blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32) (_, scores, bbox_pred, rois) = net.test_image(sess, blobs['data'], blobs['im_info']) boxes = rois[:, 1:5] / im_scales[0] scores = np.reshape(scores, [scores.shape[0], -1]) bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1]) if cfg.TEST.BBOX_REG: box_deltas = bbox_pred pred_boxes = bbox_transform_inv(boxes, box_deltas) pred_boxes = _clip_boxes(pred_boxes, im.shape) else: pred_boxes = np.tile(boxes, (1, scores.shape[1])) (scores, boxes) = (scores, pred_boxes) _t['im_detect'].toc() _t['misc'].tic() for j in range(1, imdb.num_classes): inds = np.where(scores[:, j] > thresh)[0] cls_scores = scores[inds, j] cls_boxes = boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) keep = nms(cls_dets, cfg.TEST.NMS) cls_dets = cls_dets[keep, :] all_boxes[j][i] = cls_dets if max_per_image > 0: image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)]) if len(image_scores) > max_per_image: image_thresh = np.sort(image_scores)[-max_per_image] for j in range(1, imdb.num_classes): keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0] all_boxes[j][i] = all_boxes[j][i][keep, :] _t['misc'].toc() print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s'.format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time)) det_file = os.path.join(output_dir, 'detections.pkl') with open(det_file, 'wb') as f: pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL) print('Evaluating detections') imdb.evaluate_detections(all_boxes, output_dir)
centerNet-deep-sort
positive
def result(self): class_info = {} origin_counter = Counter([x[0] for x in self.origins]) found_counter = Counter([x[0] for x in self.founds]) right_counter = Counter([x[0] for x in self.rights]) for (type_, count) in origin_counter.items(): origin = count found = found_counter.get(type_, 0) right = right_counter.get(type_, 0) <DeepExtract> recall = 0 if origin == 0 else right / origin precision = 0 if found == 0 else right / found f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall) (recall, precision, f1) = (recall, precision, f1) </DeepExtract> class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)} origin = len(self.origins) found = len(self.founds) right = len(self.rights) <DeepExtract> recall = 0 if origin == 0 else right / origin precision = 0 if found == 0 else right / found f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall) (recall, precision, f1) = (recall, precision, f1) </DeepExtract> return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
def result(self): class_info = {} origin_counter = Counter([x[0] for x in self.origins]) found_counter = Counter([x[0] for x in self.founds]) right_counter = Counter([x[0] for x in self.rights]) for (type_, count) in origin_counter.items(): origin = count found = found_counter.get(type_, 0) right = right_counter.get(type_, 0) recall = 0 if origin == 0 else right / origin precision = 0 if found == 0 else right / found f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall) (recall, precision, f1) = (recall, precision, f1) class_info[type_] = {'acc': round(precision, 4), 'recall': round(recall, 4), 'f1': round(f1, 4)} origin = len(self.origins) found = len(self.founds) right = len(self.rights) recall = 0 if origin == 0 else right / origin precision = 0 if found == 0 else right / found f1 = 0.0 if recall + precision == 0 else 2 * precision * recall / (precision + recall) (recall, precision, f1) = (recall, precision, f1) return ({'acc': precision, 'recall': recall, 'f1': f1}, class_info)
BERT-Attribute-Value-Extract
positive
def Quote(value, separator, quote): <DeepExtract> if isinstance(value, str): value = value else: value = str(value) </DeepExtract> if len(value) > 1 and value[0] == quote and (value[-1] == quote): return value if separator in value or value == '': return quote + value + quote else: return value
def Quote(value, separator, quote): if isinstance(value, str): value = value else: value = str(value) if len(value) > 1 and value[0] == quote and (value[-1] == quote): return value if separator in value or value == '': return quote + value + quote else: return value
Beta
positive
def _get_rois_blob(im_rois, im_scale): """Converts RoIs into network inputs. Arguments: im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates im_scale_factors (list): scale factors as returned by _get_image_blob Returns: blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns [level, x1, y1, x2, y2] """ <DeepExtract> rois = im_rois.astype(np.float, copy=False) * im_scale levels = np.zeros((im_rois.shape[0], 1), dtype=np.int) (rois, levels) = (rois, levels) </DeepExtract> rois_blob = np.hstack((levels, rois)) return rois_blob.astype(np.float32, copy=False)
def _get_rois_blob(im_rois, im_scale): """Converts RoIs into network inputs. Arguments: im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates im_scale_factors (list): scale factors as returned by _get_image_blob Returns: blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns [level, x1, y1, x2, y2] """ rois = im_rois.astype(np.float, copy=False) * im_scale levels = np.zeros((im_rois.shape[0], 1), dtype=np.int) (rois, levels) = (rois, levels) rois_blob = np.hstack((levels, rois)) return rois_blob.astype(np.float32, copy=False)
DIoU-pytorch-detectron
positive
def _do_bulk_delete(self, filters: Dict[str, Any]) -> Union[int, None]: if not self._bulk_delete_enabled: raise NotImplementedError(f'The {self._model_class.__name__} API does not offer bulk deletes') <DeepExtract> d = {} d.update(filters) if ordering is not None: d.update(ordering=ordering) if limit is not None: d.update(limit=limit) if offset is not None: d.update(offset=offset) query_params = d </DeepExtract> response = self._client.bulk_delete(self._api_path, **query_params) if isinstance(response, int): return response return None
def _do_bulk_delete(self, filters: Dict[str, Any]) -> Union[int, None]: if not self._bulk_delete_enabled: raise NotImplementedError(f'The {self._model_class.__name__} API does not offer bulk deletes') d = {} d.update(filters) if ordering is not None: d.update(ordering=ordering) if limit is not None: d.update(limit=limit) if offset is not None: d.update(offset=offset) query_params = d response = self._client.bulk_delete(self._api_path, **query_params) if isinstance(response, int): return response return None
balsam
positive
def validate(self, response: FieldRadio) -> None: """Validate this FieldRadio instance. :param response: Instance to check and verify options are valid """ <DeepExtract> validation = self.validator(choices=response.options) if validation.error_msg: self.valid = False else: self.valid = True validation = validation </DeepExtract> self.current_error = validation.error_msg
def validate(self, response: FieldRadio) -> None: """Validate this FieldRadio instance. :param response: Instance to check and verify options are valid """ validation = self.validator(choices=response.options) if validation.error_msg: self.valid = False else: self.valid = True validation = validation self.current_error = validation.error_msg
ansible-navigator
positive
def container_from_config(original_layer_dict, custom_objects={}): layer_dict = copy.deepcopy(original_layer_dict) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: <DeepExtract> layer_dict = copy.deepcopy(layer) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) init_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) init_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) init_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] init_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') init_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) init_layer = base_layer </DeepExtract> layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) return merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: <DeepExtract> layer_dict = copy.deepcopy(layer) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) init_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) init_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) init_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] init_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') init_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) init_layer = base_layer </DeepExtract> layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) return seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: <DeepExtract> layer_dict = copy.deepcopy(layer_dict['nodes'].get(node['name'])) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) layer = base_layer </DeepExtract> node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) return graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] return AutoEncoder(**kwargs) elif name == 'TimeDistributed': <DeepExtract> layer_dict = copy.deepcopy(layer_dict.pop('layer')) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) child_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) child_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) child_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] child_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') child_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) child_layer = base_layer </DeepExtract> if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') return TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') <DeepExtract> base_layer = get_from_module(name, globals(), 'layer', instantiate=True, kwargs=layer_dict) </DeepExtract> return base_layer
def container_from_config(original_layer_dict, custom_objects={}): layer_dict = copy.deepcopy(original_layer_dict) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: layer_dict = copy.deepcopy(layer) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) init_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) init_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) init_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] init_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') init_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) init_layer = base_layer layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) return merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: layer_dict = copy.deepcopy(layer) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) init_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) init_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) init_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] init_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') init_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) init_layer = base_layer layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) return seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer_dict = copy.deepcopy(layer_dict['nodes'].get(node['name'])) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) layer = base_layer node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) return graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] return AutoEncoder(**kwargs) elif name == 'TimeDistributed': layer_dict = copy.deepcopy(layer_dict.pop('layer')) name = layer_dict.get('name') for cls_key in custom_objects: globals()[cls_key] = custom_objects[cls_key] if name == 'Merge': mode = layer_dict.get('mode') concat_axis = layer_dict.get('concat_axis') dot_axes = layer_dict.get('dot_axes') layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) merge_layer = Merge(layer_list, mode, concat_axis, dot_axes) child_layer = merge_layer elif name == 'Sequential': layers = layer_dict.get('layers') layer_list = [] for layer in layers: init_layer = container_from_config(layer) layer_list.append(init_layer) seq_layer = containers.Sequential(layer_list) child_layer = seq_layer elif name == 'Graph': graph_layer = containers.Graph() inputs = layer_dict.get('input_config') for input in inputs: graph_layer.add_input(**input) nodes = layer_dict.get('node_config') for node in nodes: layer = container_from_config(layer_dict['nodes'].get(node['name'])) node['layer'] = layer node['create_output'] = False graph_layer.add_node(**node) outputs = layer_dict.get('output_config') for output in outputs: graph_layer.add_output(**output) child_layer = graph_layer elif name == 'AutoEncoder': kwargs = {'encoder': container_from_config(layer_dict.get('encoder_config')), 'decoder': container_from_config(layer_dict.get('decoder_config'))} for kwarg in ['output_reconstruction', 'weights']: if kwarg in layer_dict: kwargs[kwarg] = layer_dict[kwarg] child_layer = AutoEncoder(**kwargs) elif name == 'TimeDistributed': child_layer = container_from_config(layer_dict.pop('layer')) if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') child_layer = TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_layer(name, layer_dict) child_layer = base_layer if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') return TimeDistributed(child_layer, **layer_dict) else: layer_dict.pop('name') for (k, v) in layer_dict.items(): if isinstance(v, dict): vname = v.pop('name') if vname in [x for (x, y) in inspect.getmembers(constraints, predicate=inspect.isclass)]: layer_dict[k] = constraints.get(vname, v) elif vname in [x for (x, y) in inspect.getmembers(regularizers, predicate=inspect.isclass)]: layer_dict[k] = regularizers.get(vname, v) else: v['name'] = vname if 'custom_name' in layer_dict: layer_dict['name'] = layer_dict.pop('custom_name') base_layer = get_from_module(name, globals(), 'layer', instantiate=True, kwargs=layer_dict) return base_layer
encoder_decoder
positive
def test_execute_delete_params(tmp_path: pathlib.Path) -> None: """Test execute delete params.""" <DeepExtract> _test_init(tmp_path) (config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config') </DeepExtract> section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json' <DeepExtract> rows = [] csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv') with open(csv_path, 'r', newline='') as f: csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL) for row in csv_reader: rows.append(row) rows = rows </DeepExtract> for i in [3, 6, 8, 11]: for j in [13, 14, 15, 16]: rows[i][j] = '' with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader: mock_csv_reader.return_value = rows tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section) retval = tgt.execute() assert retval == TaskOutcome.SUCCESS fp = pathlib.Path(tmp_path) / 'component-definition.json' cd = ComponentDefinition.oscal_read(fp) component = cd.components[0] assert len(component.props) == 50 for prop in component.props: assert prop.name != 'Parameter_Id'
def test_execute_delete_params(tmp_path: pathlib.Path) -> None: """Test execute delete params.""" _test_init(tmp_path) (config, section) = _get_config_section(tmp_path, 'test-csv-to-oscal-cd-bp.config') section['component-definition'] = 'tests/data/csv/component-definitions/bp/component-definition.json' rows = [] csv_path = pathlib.Path('tests/data/csv/bp.sample.v2.csv') with open(csv_path, 'r', newline='') as f: csv_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_MINIMAL) for row in csv_reader: rows.append(row) rows = rows for i in [3, 6, 8, 11]: for j in [13, 14, 15, 16]: rows[i][j] = '' with mock.patch('trestle.tasks.csv_to_oscal_cd.csv.reader') as mock_csv_reader: mock_csv_reader.return_value = rows tgt = csv_to_oscal_cd.CsvToOscalComponentDefinition(section) retval = tgt.execute() assert retval == TaskOutcome.SUCCESS fp = pathlib.Path(tmp_path) / 'component-definition.json' cd = ComponentDefinition.oscal_read(fp) component = cd.components[0] assert len(component.props) == 50 for prop in component.props: assert prop.name != 'Parameter_Id'
compliance-trestle
positive
@verbose(True, verbose_output=False, timeout=None, _str='Analyzing image with OCR') def analyze(self, data): """ start ocr reading logic for packed files only """ self.words = [] data['OCR'] = deepcopy(self.datastruct) with ignore_excpetion(Exception): if len(data['Packed']['Files']) > 0: <DeepExtract> for item in data['Packed']['Files']: with ignore_excpetion(Exception): image = Image.open(BytesIO(data['FilesDumps'][item['Path']])) image = image.convert('RGBA') text = image_to_string(image, config='--psm 6') words = findall('[ -~]{4,}', text) if len(words) > 0: self.words.append([words, item['Path']]) </DeepExtract> <DeepExtract> for words in self.words: for word in words[0]: if len(word) > 0: data['OCR']['OCR'].append({'Word': word, 'File': words[1]}) </DeepExtract>
@verbose(True, verbose_output=False, timeout=None, _str='Analyzing image with OCR') def analyze(self, data): """ start ocr reading logic for packed files only """ self.words = [] data['OCR'] = deepcopy(self.datastruct) with ignore_excpetion(Exception): if len(data['Packed']['Files']) > 0: for item in data['Packed']['Files']: with ignore_excpetion(Exception): image = Image.open(BytesIO(data['FilesDumps'][item['Path']])) image = image.convert('RGBA') text = image_to_string(image, config='--psm 6') words = findall('[ -~]{4,}', text) if len(words) > 0: self.words.append([words, item['Path']]) for words in self.words: for word in words[0]: if len(word) > 0: data['OCR']['OCR'].append({'Word': word, 'File': words[1]}) </DeepExtract>
analyzer
positive
def test_raises_when_init_with_no_args_called_with_args(self): @dataclass class Data(Aggregate): pass @dataclass class MyAgg(Aggregate): pass def assert_raises(cls): method_name = get_method_name(cls.__init__) with self.assertRaises(TypeError) as cm: cls(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: cls(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") <DeepExtract> method_name = get_method_name(Data.__init__) with self.assertRaises(TypeError) as cm: Data(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: Data(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") </DeepExtract> <DeepExtract> method_name = get_method_name(MyAgg.__init__) with self.assertRaises(TypeError) as cm: MyAgg(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: MyAgg(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") </DeepExtract>
def test_raises_when_init_with_no_args_called_with_args(self): @dataclass class Data(Aggregate): pass @dataclass class MyAgg(Aggregate): pass def assert_raises(cls): method_name = get_method_name(cls.__init__) with self.assertRaises(TypeError) as cm: cls(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: cls(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") method_name = get_method_name(Data.__init__) with self.assertRaises(TypeError) as cm: Data(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: Data(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") method_name = get_method_name(MyAgg.__init__) with self.assertRaises(TypeError) as cm: MyAgg(0) self.assertEqual(cm.exception.args[0], f'{method_name}() takes 1 positional argument but 2 were given') with self.assertRaises(TypeError) as cm: MyAgg(value=0) self.assertEqual(cm.exception.args[0], f"{method_name}() got an unexpected keyword argument 'value'") </DeepExtract>
eventsourcing
positive
def run(self): <DeepExtract> with self.spoke_regional_client('cloudformation') as cloudformation: try: paginator = cloudformation.get_paginator('describe_stacks') for page in paginator.paginate(StackName=self.stack_name): for stack in page.get('Stacks', []): status = stack.get('StackStatus') if status != 'DELETE_COMPLETE': status = status except ClientError as error: if error.response['Error']['Message'] != f'Stack with id {self.stack_name} does not exist': raise error status = '-' </DeepExtract> if status == '-': <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': 'Stack would be created', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> elif status == 'ROLLBACK_COMPLETE': if self.should_delete_rollback_complete_stacks: <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': 'Stack would be replaced', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> else: <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': '-', 'effect': constants.NO_CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': "Stack needs remediation - it's in ROLLBACK_COMPLETE", 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> else: task_output = dict(**self.params_for_results_display(), account_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.account_parameters), launch_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.launch_parameters), manifest_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.manifest_parameters)) all_params = self.get_parameter_values() template_to_provision_source = self.input().get('template').open('r').read() try: template_to_provision = cfn_tools.load_yaml(template_to_provision_source) except Exception: try: template_to_provision = cfn_tools.load_json(template_to_provision_source) except Exception: raise Exception('Could not parse new template as YAML or JSON') params_to_use = dict() for (param_name, p) in template_to_provision.get('Parameters', {}).items(): if all_params.get(param_name, p.get('DefaultValue')) is not None: params_to_use[param_name] = all_params.get(param_name, p.get('DefaultValue')) existing_stack_params_dict = dict() existing_template = '' if status in ['CREATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'UPDATE_COMPLETE', 'IMPORT_COMPLETE', 'IMPORT_ROLLBACK_COMPLETE']: with self.spoke_regional_client('cloudformation') as cloudformation: existing_stack_params_dict = {} stack = cloudformation.describe_stacks(StackName=self.stack_name).get('Stacks')[0] summary_response = cloudformation.get_template_summary(StackName=self.stack_name) for parameter in summary_response.get('Parameters'): existing_stack_params_dict[parameter.get('ParameterKey')] = parameter.get('DefaultValue') for stack_param in stack.get('Parameters', []): existing_stack_params_dict[stack_param.get('ParameterKey')] = stack_param.get('ParameterValue') template_body = cloudformation.get_template(StackName=self.stack_name, TemplateStage='Original').get('TemplateBody') try: existing_template = cfn_tools.load_yaml(template_body) except Exception: try: existing_template = cfn_tools.load_json(template_body) except Exception: raise Exception('Could not parse existing template as YAML or JSON') template_to_use = cfn_tools.dump_yaml(template_to_provision) if status == 'UPDATE_ROLLBACK_COMPLETE': <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Stack would be updated', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> elif existing_stack_params_dict == params_to_use: self.info(f'params unchanged') if template_to_use == cfn_tools.dump_yaml(existing_template): self.info(f'template the same') <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.NO_CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'No change', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> else: self.info(f'template changed') <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Template has changed', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract> else: self.info(f'params changed') <DeepExtract> with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Parameters have changed', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract>
def run(self): with self.spoke_regional_client('cloudformation') as cloudformation: try: paginator = cloudformation.get_paginator('describe_stacks') for page in paginator.paginate(StackName=self.stack_name): for stack in page.get('Stacks', []): status = stack.get('StackStatus') if status != 'DELETE_COMPLETE': status = status except ClientError as error: if error.response['Error']['Message'] != f'Stack with id {self.stack_name} does not exist': raise error status = '-' if status == '-': with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': 'Stack would be created', 'params': self.param_kwargs}, indent=4, default=str)) elif status == 'ROLLBACK_COMPLETE': if self.should_delete_rollback_complete_stacks: with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': 'Stack would be replaced', 'params': self.param_kwargs}, indent=4, default=str)) else: with self.output().open('w') as f: f.write(json.dumps({'current_version': '-', 'new_version': '-', 'effect': constants.NO_CHANGE, 'current_status': '-', 'active': 'N/A', 'notes': "Stack needs remediation - it's in ROLLBACK_COMPLETE", 'params': self.param_kwargs}, indent=4, default=str)) else: task_output = dict(**self.params_for_results_display(), account_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.account_parameters), launch_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.launch_parameters), manifest_parameters=servicecatalog_puppet.serialisation_utils.unwrap(self.manifest_parameters)) all_params = self.get_parameter_values() template_to_provision_source = self.input().get('template').open('r').read() try: template_to_provision = cfn_tools.load_yaml(template_to_provision_source) except Exception: try: template_to_provision = cfn_tools.load_json(template_to_provision_source) except Exception: raise Exception('Could not parse new template as YAML or JSON') params_to_use = dict() for (param_name, p) in template_to_provision.get('Parameters', {}).items(): if all_params.get(param_name, p.get('DefaultValue')) is not None: params_to_use[param_name] = all_params.get(param_name, p.get('DefaultValue')) existing_stack_params_dict = dict() existing_template = '' if status in ['CREATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE', 'UPDATE_COMPLETE', 'IMPORT_COMPLETE', 'IMPORT_ROLLBACK_COMPLETE']: with self.spoke_regional_client('cloudformation') as cloudformation: existing_stack_params_dict = {} stack = cloudformation.describe_stacks(StackName=self.stack_name).get('Stacks')[0] summary_response = cloudformation.get_template_summary(StackName=self.stack_name) for parameter in summary_response.get('Parameters'): existing_stack_params_dict[parameter.get('ParameterKey')] = parameter.get('DefaultValue') for stack_param in stack.get('Parameters', []): existing_stack_params_dict[stack_param.get('ParameterKey')] = stack_param.get('ParameterValue') template_body = cloudformation.get_template(StackName=self.stack_name, TemplateStage='Original').get('TemplateBody') try: existing_template = cfn_tools.load_yaml(template_body) except Exception: try: existing_template = cfn_tools.load_json(template_body) except Exception: raise Exception('Could not parse existing template as YAML or JSON') template_to_use = cfn_tools.dump_yaml(template_to_provision) if status == 'UPDATE_ROLLBACK_COMPLETE': with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Stack would be updated', 'params': self.param_kwargs}, indent=4, default=str)) elif existing_stack_params_dict == params_to_use: self.info(f'params unchanged') if template_to_use == cfn_tools.dump_yaml(existing_template): self.info(f'template the same') with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.NO_CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'No change', 'params': self.param_kwargs}, indent=4, default=str)) else: self.info(f'template changed') with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Template has changed', 'params': self.param_kwargs}, indent=4, default=str)) else: self.info(f'params changed') with self.output().open('w') as f: f.write(json.dumps({'current_version': '?', 'new_version': self.version_id, 'effect': constants.CHANGE, 'current_status': status, 'active': 'N/A', 'notes': 'Parameters have changed', 'params': self.param_kwargs}, indent=4, default=str)) </DeepExtract>
aws-service-catalog-puppet
positive
def _scale_enum(anchor, scales): """Enumerate a set of anchors for each scale wrt an anchor.""" <DeepExtract> w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) (w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr) </DeepExtract> ws = w * scales hs = h * scales <DeepExtract> ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1))) anchors = anchors </DeepExtract> return anchors
def _scale_enum(anchor, scales): """Enumerate a set of anchors for each scale wrt an anchor.""" w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) (w, h, x_ctr, y_ctr) = (w, h, x_ctr, y_ctr) ws = w * scales hs = h * scales ws = ws[:, np.newaxis] hs = hs[:, np.newaxis] anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1))) anchors = anchors return anchors
doubleheadsrcnn
positive
def write(*args, **kwargs): """Iterates arguments, writes tuple and list arguments one line per element, else writes argument as single line. If no arguments writes blank line. If argument is None nothing is written. self.newline is appended to each line.""" if 'trace' in kwargs and len(args) > 0: trace(kwargs['trace'], args[0]) if len(args) == 0: <DeepExtract> if not (self.skip_blank_lines and (not line or not line.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', line) or '') + self.newline) self.lines_out = self.lines_out + 1 </DeepExtract> self.lines_out = self.lines_out + 1 else: for arg in args: if utils.is_array(arg): for s in arg: <DeepExtract> if not (self.skip_blank_lines and (not s or not s.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', s) or '') + self.newline) self.lines_out = self.lines_out + 1 </DeepExtract> elif arg is not None: <DeepExtract> if not (self.skip_blank_lines and (not arg or not arg.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', arg) or '') + self.newline) self.lines_out = self.lines_out + 1 </DeepExtract>
def write(*args, **kwargs): """Iterates arguments, writes tuple and list arguments one line per element, else writes argument as single line. If no arguments writes blank line. If argument is None nothing is written. self.newline is appended to each line.""" if 'trace' in kwargs and len(args) > 0: trace(kwargs['trace'], args[0]) if len(args) == 0: if not (self.skip_blank_lines and (not line or not line.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', line) or '') + self.newline) self.lines_out = self.lines_out + 1 self.lines_out = self.lines_out + 1 else: for arg in args: if utils.is_array(arg): for s in arg: if not (self.skip_blank_lines and (not s or not s.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', s) or '') + self.newline) self.lines_out = self.lines_out + 1 elif arg is not None: if not (self.skip_blank_lines and (not arg or not arg.strip())): self.f.write((re.sub('\\\\({[a-zA-Z0-9_][a-zA-Z0-9_\\-]*)', '\\1', arg) or '') + self.newline) self.lines_out = self.lines_out + 1 </DeepExtract>
asciidoc-py
positive