hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1eed2b0669eafd581e3cbf619d30ba2ae1602a19
| 1,717
|
py
|
Python
|
notebooks/batch_evaluate_mvar.py
|
facebookresearch/robust_mobo
|
39195f5796e3dd19c411d4cd4c8252488ecbd0ec
|
[
"MIT"
] | 5
|
2022-02-15T17:08:14.000Z
|
2022-03-25T04:12:30.000Z
|
notebooks/batch_evaluate_mvar.py
|
facebookresearch/robust_mobo
|
39195f5796e3dd19c411d4cd4c8252488ecbd0ec
|
[
"MIT"
] | null | null | null |
notebooks/batch_evaluate_mvar.py
|
facebookresearch/robust_mobo
|
39195f5796e3dd19c411d4cd4c8252488ecbd0ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import sys
import torch
from approximate_max_hv_run import construct_mvar_hv, max_hv_dir, exp_dir
from botorch.utils.multi_objective import is_non_dominated
def batch_evaluate_mvar(problem, batch_i):
x_path = os.path.join(max_hv_dir, f"{problem}_Xs.pt")
# Using nd since we remove the dominated ones
out_path = os.path.join(max_hv_dir, f"{problem}_mvar_{batch_i}_nd.pt")
if os.path.exists(out_path):
raise FileExistsError
all_Xs = torch.load(x_path)["all_Xs"]
active_X = torch.split(all_Xs, 1000)[batch_i]
config_path = os.path.join(exp_dir, problem, "config.json")
with open(config_path, "r") as f:
config = json.load(f)
config.pop("device")
mvar_hv = construct_mvar_hv(**config)
# Calculate the MVaR
perturbed_X = mvar_hv.perturbation(active_X)
perturbed_Y = mvar_hv.eval_problem(perturbed_X)
if mvar_hv.num_constraints > 0:
infeas = (perturbed_Y[..., -mvar_hv.num_constraints :] < 0).any(dim=-1)
perturbed_Y = perturbed_Y[..., : -mvar_hv.num_constraints]
perturbed_Y[infeas] = mvar_hv.hv.ref_point
new_mvar = (
mvar_hv.mvar(
perturbed_Y.cpu(),
use_cpu=True,
)
.view(-1, perturbed_Y.shape[-1])
.to(active_X)
)
# Remove dominated ones
mask = is_non_dominated(new_mvar)
new_mvar = new_mvar[mask]
torch.save(new_mvar, out_path)
if __name__ == "__main__":
batch_evaluate_mvar(sys.argv[1], int(sys.argv[2]))
| 31.796296
| 79
| 0.684916
|
e340350b8b9305464632dfe464ae1fb853c58599
| 202
|
py
|
Python
|
django_profile/fields.py
|
millerthegorilla/django_profile
|
9860022a8c442901c955c4e3c7500f338b958d16
|
[
"MIT"
] | null | null | null |
django_profile/fields.py
|
millerthegorilla/django_profile
|
9860022a8c442901c955c4e3c7500f338b958d16
|
[
"MIT"
] | null | null | null |
django_profile/fields.py
|
millerthegorilla/django_profile
|
9860022a8c442901c955c4e3c7500f338b958d16
|
[
"MIT"
] | null | null | null |
from crispy_forms import layout
class FloatingField(layout.Field):
template = 'fields/profile_floating_field.html'
class FileInput(layout.Field):
template = 'fields/profile_file_input.html'
| 20.2
| 51
| 0.782178
|
288232c9b5b36672f10da7958d1449f28da85944
| 96
|
py
|
Python
|
newfile.py
|
kumarjsandip/gmb
|
62cae0d43b5da6f2c571a47842b2c4c1e5ea342d
|
[
"MIT"
] | null | null | null |
newfile.py
|
kumarjsandip/gmb
|
62cae0d43b5da6f2c571a47842b2c4c1e5ea342d
|
[
"MIT"
] | null | null | null |
newfile.py
|
kumarjsandip/gmb
|
62cae0d43b5da6f2c571a47842b2c4c1e5ea342d
|
[
"MIT"
] | null | null | null |
def main():
print("This is check")
return "This is check"
if __name__ == '__main__':
main()
| 13.714286
| 26
| 0.645833
|
fc019522e2adb38e42772742e717995e230caa6d
| 3,755
|
py
|
Python
|
stored_mail/models.py
|
rectory-school/rectory-apps-updated
|
a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1
|
[
"MIT"
] | null | null | null |
stored_mail/models.py
|
rectory-school/rectory-apps-updated
|
a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1
|
[
"MIT"
] | 30
|
2021-07-16T12:54:14.000Z
|
2021-12-24T16:59:04.000Z
|
stored_mail/models.py
|
rectory-school/rectory-apps-updated
|
a6d47f6d5928f0c816eb45fd229da2f9f2fa2ff1
|
[
"MIT"
] | null | null | null |
"""Models for stored mail sender"""
import email.utils
import email.message
from email.headerregistry import Address
from typing import List
from uuid import uuid4
from django.db import models
from django.conf import settings
from django.core.mail import EmailMessage, EmailMultiAlternatives
FIELD_OPTIONS = (
('to', 'To'),
('cc', 'Cc'),
('bcc', 'Bcc'),
('reply-to', 'Reply To'),
)
_field_option_length = max((len(o[0]) for o in FIELD_OPTIONS))
class OutgoingMessage(models.Model):
"""Outgoing email stored for sending"""
unique_id = models.UUIDField(default=uuid4, unique=True)
from_name = models.CharField(max_length=255)
from_address = models.EmailField()
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
subject = models.CharField(max_length=4096, blank=True)
text = models.TextField()
html = models.TextField(blank=True)
sent_at = models.DateTimeField(null=True, db_index=True)
last_send_attempt = models.DateTimeField(null=True, db_index=True)
@property
def message_id(self) -> str:
"""Unique message ID"""
server_email = settings.SERVER_EMAIL
_, domain = server_email.split('@')
return f"{self.unique_id}@{domain}"
def get_django_email(self, connection=None) -> EmailMessage:
"""Get a Django email message"""
message_id = self.message_id
# Django kwarg to related_address field key
related_address_map = {
'to': 'to',
'cc': 'cc',
'bcc': 'bcc',
'reply_to': 'reply-to'
}
# Constructors are the same for both emails and email alternatives,
# so do a little meta-programming and extract the commonalities
kwargs = {
'from_email': email.utils.formataddr((self.from_name, self.from_address)),
'subject': self.subject,
'body': self.text,
'headers': {
'Message-ID': message_id,
'Date': email.utils.formatdate(self.created_at.timestamp())
},
'connection': connection,
}
# Load to/cc/bcc/reply-to
all_addresses: List[RelatedAddress] = list(self.addresses.all())
for mail_obj_key, addr_field_key in related_address_map.items():
addresses = [addr for addr in all_addresses if addr.field == addr_field_key]
kwargs[mail_obj_key] = [addr.encoded for addr in addresses]
# Prune falsey values
kwargs = {k: v for k, v in kwargs.items() if v}
if self.html:
msg = EmailMultiAlternatives(**kwargs)
msg.attach_alternative(self.html, "text/html")
else:
msg = EmailMessage(**kwargs)
return msg
def __str__(self):
return f"Message {self.pk}"
class RelatedAddress(models.Model):
"""Address to send an email to"""
name = models.CharField(max_length=255)
address = models.EmailField()
message = models.ForeignKey(OutgoingMessage, on_delete=models.CASCADE, related_name='addresses')
field = models.CharField(choices=FIELD_OPTIONS, max_length=_field_option_length)
def __str__(self):
if not self.name:
return self.address
return email.utils.formataddr((self.name, self.address))
class Meta:
unique_together = (
('address', 'message'),
)
@property
def addr_obj(self) -> Address:
"""The address object to send from"""
username, domain = self.address.split('@', 1)
return Address(self.name, username, domain)
@property
def encoded(self) -> str:
"""An encoded address"""
return email.utils.formataddr((self.name, self.address))
| 29.108527
| 100
| 0.630892
|
2866a1d656e089a1386439a4b1be58ac9a4047b3
| 3,144
|
py
|
Python
|
tools/data/kinetics400/add_keys_for_missing_dets.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 20
|
2021-03-31T02:25:20.000Z
|
2022-03-11T11:45:59.000Z
|
tools/data/kinetics400/add_keys_for_missing_dets.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 6
|
2021-05-27T18:08:39.000Z
|
2022-03-23T14:00:51.000Z
|
tools/data/kinetics400/add_keys_for_missing_dets.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 4
|
2021-03-31T03:11:45.000Z
|
2021-08-22T11:11:45.000Z
|
import numpy as np
import os
import pdb
from PIL import Image, ImageDraw
import cv2
import glob as gb
# import av
from skvideo.io import ffprobe
import pandas as pd
import sys
"""
Add keys for videos without any detections, add also frame keys for those videos
"""
split = 'train'
src_det_file_path = '/home/jinchoi/src/rehab/dataset/action/kinetics/detectron_results/kinetics100/kinetics100_{}_detections_height_256pixels.npy'.format(split)
if split == 'val':
ref_listfile_path = '/home/jinchoi/src/video-data-aug/data/kinetics400/videossl_splits/kinetics100_{}_100_percent_labeled_rawframes.txt'.format(split)
else:
ref_listfile_path = '/home/jinchoi/src/video-data-aug/data/kinetics400/videossl_splits/kinetics100_{}_100_percent_labeled_rawframes.txt'.format(split)
tgt_det_file_path = '/home/jinchoi/src/rehab/dataset/action/kinetics/detectron_results/kinetics100/kinetics100_{}_no_missing_keys_detections_height_256pixels.npy'.format(split)
# read the original detection file
dets = np.load(src_det_file_path, allow_pickle=True)
dets = dets.item()
video_res_info = dets['video_res_info']
dets = dets['dets']
print('Done with reading the org detection numpy file: {}'.format(src_det_file_path))
# read the reference video listfile
df = pd.read_csv(ref_listfile_path, header=None, sep=' ')
ref_data = df.values
new_dets = dict()
new_dets = {}
ref_cls_list = []
missing_det_vid_cnt = 0
# contruct a dictionary with class as key and the class/vid, # of frames, label as values
for i,row in enumerate(ref_data):
if i%1000 == 0:
print('Processing {}/{} videos in the ref. listfile'.format(i+1, ref_data.shape[0]))
# cur_key = row[0].split('/')[0] + '/' + row[0].split('/')[1][:11]
cur_cls = row[0].split('/')[0]
cur_vid = row[0].split('/')[1][:11]
num_frms = row[1]
if cur_cls not in new_dets:
new_dets[cur_cls] = dict()
if cur_vid not in new_dets[cur_cls]:
new_dets[cur_cls][cur_vid] = dict()
for idx in range(num_frms):
idx_one_based = idx + 1
new_dets[cur_cls][cur_vid][idx_one_based] = {'frame'
: idx_one_based, 'human_boxes': np.zeros([0,5]).astype(np.float32)}
if cur_vid in dets[cur_cls]:
assert len(new_dets[cur_cls][cur_vid]) == len(dets[cur_cls][cur_vid])
new_dets[cur_cls][cur_vid] = dets[cur_cls][cur_vid]
dets[cur_cls].pop(cur_vid,None)
else:
missing_det_vid_cnt += 1
print(i, cur_vid)
if cur_cls not in ref_cls_list:
ref_cls_list.append(cur_cls)
sys.stdout.flush()
print('Done with adding missing vid keys and frame keys by comparing {} and {}'.format(ref_listfile_path, src_det_file_path))
# validate if all the exisiting dets are copied to the new_dets
for cur_cls,cur_data in dets.items():
if len(cur_data.keys()) > 0:
pdb.set_trace()
wrapped_dets = dict(
video_res_info=video_res_info,
dets = new_dets
)
np.save(tgt_det_file_path, wrapped_dets)
print('Detection results saved to {}'.format(tgt_det_file_path))
| 36.988235
| 176
| 0.689567
|
da8bc1d648651e0c54e92513c0976deec680728f
| 1,191
|
py
|
Python
|
core/script/oidrules/hires.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
core/script/oidrules/hires.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
core/script/oidrules/hires.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# HiresRule
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from .loader import load_rule
class HiresRule(object):
"""
Select *hires* chain if SNMP | IF-MIB HC capability set,
Select *normal* capability otherwise
"""
name = "hires"
def __init__(self, hires, normal):
self.hires = hires
self.normal = normal
def iter_oids(self, script, metric):
if script.has_capability("SNMP | IF-MIB | HC"):
g = self.hires.iter_oids
else:
g = self.normal.iter_oids
for r in g(script, metric):
yield r
@classmethod
def from_json(cls, data):
for v in ("hires", "normal"):
if v not in data:
raise ValueError("%s is required" % v)
return HiresRule(hires=load_rule(data["hires"]), normal=load_rule(data["normal"]))
| 28.357143
| 90
| 0.494542
|
31ed781e820e3383b8130a3c65172c8681dea7f6
| 1,721
|
py
|
Python
|
data_source/stations_meteo/data.py
|
Frovu/cr-data-tool
|
4a87115eb10c548bcf8f4f3364e28196cfd3ebf4
|
[
"MIT"
] | null | null | null |
data_source/stations_meteo/data.py
|
Frovu/cr-data-tool
|
4a87115eb10c548bcf8f4f3364e28196cfd3ebf4
|
[
"MIT"
] | null | null | null |
data_source/stations_meteo/data.py
|
Frovu/cr-data-tool
|
4a87115eb10c548bcf8f4f3364e28196cfd3ebf4
|
[
"MIT"
] | null | null | null |
import data_source.temperature_model.temperature as temperature
import data_source.stations_meteo.db_proxy as proxy
import data_source.stations_meteo.parser as parser
from core.sequence_filler import SequenceFiller, fill_fn
from datetime import datetime, timezone
from math import floor, ceil
import logging as log
scheduler = SequenceFiller(ttl=15)
completed_query_chache = dict()
PERIOD = 3600
def get_with_model(lat, lon, t_from, t_to, period=PERIOD):
lat = round(float(lat), 2)
lon = round(float(lon), 2)
token = (lat, lon)
t_from, t_to = period * floor(t_from / period), period * ceil(t_to / period)
key = (token, t_from, t_to)
t_trim = datetime.utcnow().replace(tzinfo=timezone.utc).timestamp() // period * period - period
if t_trim > t_to: t_trim = t_to
is_done, info = scheduler.status(key)
if is_done == False:
return 'failed' if info.get('failed') else 'busy', info
station = proxy.select_station(lat, lon)
if not parser.supported(station):
return temperature.get(lat, lon, t_from, t_to)
model_status, model_r = temperature.get(lat, lon, t_from, t_to, True)
if model_status == 'unknown':
return model_status, model_r
if model_status == 'ok' and not proxy.analyze_integrity(station, t_from, t_trim):
return 'ok', proxy.select(station, t_from, t_to, True)
log.info(f'LOCAL METEO: Satisfying {station} {t_from}:{t_trim}')
q = scheduler.do_fill(token, t_from, t_trim, period,
parser.get_tasks(station, period, fill_fn), key_overwrite=key)
if model_status == 'accepted':
model_r.append_tasks(q.tasks) # use temp_model query object
scheduler.query(key, model_r)
return 'accepted', None
| 44.128205
| 99
| 0.710052
|
c57a471a1162a0614d4f9876e9ebef248b8f3891
| 1,998
|
py
|
Python
|
examples/pix2pix/facade_dataset_chainerio.py
|
shu65/chainerio
|
929d091160e524e1ca746bd254ba5cff3bd116b7
|
[
"MIT"
] | null | null | null |
examples/pix2pix/facade_dataset_chainerio.py
|
shu65/chainerio
|
929d091160e524e1ca746bd254ba5cff3bd116b7
|
[
"MIT"
] | null | null | null |
examples/pix2pix/facade_dataset_chainerio.py
|
shu65/chainerio
|
929d091160e524e1ca746bd254ba5cff3bd116b7
|
[
"MIT"
] | null | null | null |
from PIL import Image
from chainer.dataset import dataset_mixin
import numpy as np
# CHAINERIO import
import chainerio
# CHAINERIO import end
# download `BASE` dataset from http://cmp.felk.cvut.cz/~tylecr1/facade/
# chainerio.set_root("hdfs")
class FacadeDataset(dataset_mixin.DatasetMixin):
def __init__(self, dataDir='./facade/base', data_range=(1, 300)):
print("load dataset start")
print(" from: %s" % dataDir)
print(" range: [%d, %d)" % (data_range[0], data_range[1]))
self.dataDir = dataDir
self.dataset = []
for i in range(data_range[0], data_range[1]):
# CHAINERIO add
img_data = chainerio.open(dataDir+"/cmp_b%04d.jpg" % i, mode='rb')
img = Image.open(img_data)
label_data = chainerio.open(dataDir+"/cmp_b%04d.png" % i, mode='rb')
label = Image.open(label_data)
# CHAINERIO add end
w, h = img.size
r = 286/float(min(w, h))
# resize images so that min(w, h) == 286
img = img.resize((int(r*w), int(r*h)), Image.BILINEAR)
label = label.resize((int(r*w), int(r*h)), Image.NEAREST)
img = np.asarray(img).astype("f").transpose(2, 0, 1)/128.0-1.0
label_ = np.asarray(label)-1 # [0, 12)
label = np.zeros((12, img.shape[1], img.shape[2])).astype("i")
for j in range(12):
label[j, :] = label_ == j
self.dataset.append((img, label))
print("load dataset done")
def __len__(self):
return len(self.dataset)
# return (label, img)
def get_example(self, i, crop_width=256):
_, h, w = self.dataset[i][0].shape
x_l = np.random.randint(0, w-crop_width)
x_r = x_l+crop_width
y_l = np.random.randint(0, h-crop_width)
y_r = y_l+crop_width
label = self.dataset[i][1][:, y_l:y_r, x_l:x_r]
img = self.dataset[i][0][:, y_l:y_r, x_l:x_r]
return label, img
| 35.052632
| 80
| 0.571071
|
04d2fb1f6503f16ef71a65724aeec6bb98ffe535
| 7,455
|
py
|
Python
|
recognition/arcface_torch/backbones/iresnet.py
|
yx9527/insightface
|
4eae1d4e0d4232789df1968d099cd6219752a4a3
|
[
"MIT"
] | null | null | null |
recognition/arcface_torch/backbones/iresnet.py
|
yx9527/insightface
|
4eae1d4e0d4232789df1968d099cd6219752a4a3
|
[
"MIT"
] | null | null | null |
recognition/arcface_torch/backbones/iresnet.py
|
yx9527/insightface
|
4eae1d4e0d4232789df1968d099cd6219752a4a3
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1, act='prelu'):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes) if act == 'prelu' else \
nn.LeakyReLU(0.125, inplace=True) if act == 'leaky' else \
act if isinstance(act, nn.Module) else nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False, act='prelu'):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes) if act == 'prelu' else nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2, act=act)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0], act=act)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1], act=act)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2], act=act)
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, act='prelu'):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, act=act))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation, act=act))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs)
| 39.444444
| 107
| 0.548759
|
a3caa54bc154d2b17dca22e1731d4f539c1be6ef
| 891
|
py
|
Python
|
craigslist/sites.py
|
echentw/python-craigslist
|
a20f02afee6ce4d1f20faa320b7f6085c589bfe7
|
[
"MIT-0"
] | 1
|
2020-08-08T22:11:11.000Z
|
2020-08-08T22:11:11.000Z
|
craigslist/sites.py
|
pythonthings/python-craigslist
|
a20f02afee6ce4d1f20faa320b7f6085c589bfe7
|
[
"MIT-0"
] | null | null | null |
craigslist/sites.py
|
pythonthings/python-craigslist
|
a20f02afee6ce4d1f20faa320b7f6085c589bfe7
|
[
"MIT-0"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
ALL_SITES_URL = 'http://www.craigslist.org/about/sites'
SITE_URL = 'http://%s.craigslist.org'
def get_all_sites():
response = requests.get(ALL_SITES_URL)
response.raise_for_status() # Something failed?
soup = BeautifulSoup(response.content, 'html.parser')
sites = set()
for box in soup.findAll('div', {'class': 'box'}):
for a in box.findAll('a'):
# Remove protocol and get subdomain
site = a.attrs['href'].rsplit('//', 1)[1].split('.')[0]
sites.add(site)
return sites
def get_all_areas(site):
response = requests.get(SITE_URL % site)
response.raise_for_status() # Something failed?
soup = BeautifulSoup(response.content, 'html.parser')
raw = soup.select('ul.sublinks li a')
sites = set(a.attrs['href'].rsplit('/')[1] for a in raw)
return sites
| 29.7
| 67
| 0.646465
|
ad3f7e6cfe842d43aa9c34023fbecb48fb6c2102
| 200
|
py
|
Python
|
slnee_quality/slnee_quality/doctype/deps_table/deps_table.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
slnee_quality/slnee_quality/doctype/deps_table/deps_table.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
slnee_quality/slnee_quality/doctype/deps_table/deps_table.py
|
erpcloudsystems/slnee_quality
|
ad81f029a795ee073768c7c933cd91e61b6df059
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, erpcloud.systems and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class DepsTable(Document):
pass
| 22.222222
| 55
| 0.795
|
56afdf96454a994591a3d97e71b8d9619e0e68c8
| 7,717
|
py
|
Python
|
PaddleCV/PaddleDetection/ppdet/modeling/backbones/mobilenet.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 4
|
2020-01-04T13:15:02.000Z
|
2021-07-21T07:50:02.000Z
|
PaddleCV/PaddleDetection/ppdet/modeling/backbones/mobilenet.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/PaddleDetection/ppdet/modeling/backbones/mobilenet.py
|
XiaoguangHu01/models
|
a95d49323ed504e5a9164586f171f408954fd43a
|
[
"Apache-2.0"
] | 3
|
2019-10-31T07:18:49.000Z
|
2020-01-13T03:18:39.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from ppdet.core.workspace import register
__all__ = ['MobileNet']
@register
class MobileNet(object):
"""
MobileNet v1, see https://arxiv.org/abs/1704.04861
Args:
norm_type (str): normalization type, 'bn' and 'sync_bn' are supported
norm_decay (float): weight decay for normalization layer weights
conv_group_scale (int): scaling factor for convolution groups
with_extra_blocks (bool): if extra blocks should be added
extra_block_filters (list): number of filter for each extra block
"""
__shared__ = ['norm_type', 'weight_prefix_name']
def __init__(self,
norm_type='bn',
norm_decay=0.,
conv_group_scale=1,
conv_learning_rate=1.0,
with_extra_blocks=False,
extra_block_filters=[[256, 512], [128, 256], [128, 256],
[64, 128]],
weight_prefix_name=''):
self.norm_type = norm_type
self.norm_decay = norm_decay
self.conv_group_scale = conv_group_scale
self.conv_learning_rate = conv_learning_rate
self.with_extra_blocks = with_extra_blocks
self.extra_block_filters = extra_block_filters
self.prefix_name = weight_prefix_name
def _conv_norm(self,
input,
filter_size,
num_filters,
stride,
padding,
num_groups=1,
act='relu',
use_cudnn=True,
name=None):
parameter_attr = ParamAttr(
learning_rate=self.conv_learning_rate,
initializer=fluid.initializer.MSRA(),
name=name + "_weights")
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn_name = name + "_bn"
norm_decay = self.norm_decay
bn_param_attr = ParamAttr(
regularizer=L2Decay(norm_decay), name=bn_name + '_scale')
bn_bias_attr = ParamAttr(
regularizer=L2Decay(norm_decay), name=bn_name + '_offset')
return fluid.layers.batch_norm(
input=conv,
act=act,
param_attr=bn_param_attr,
bias_attr=bn_bias_attr,
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def depthwise_separable(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
scale,
name=None):
depthwise_conv = self._conv_norm(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False,
name=name + "_dw")
pointwise_conv = self._conv_norm(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0,
name=name + "_sep")
return pointwise_conv
def _extra_block(self,
input,
num_filters1,
num_filters2,
num_groups,
stride,
name=None):
pointwise_conv = self._conv_norm(
input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0,
name=name + "_extra1")
normal_conv = self._conv_norm(
input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1,
name=name + "_extra2")
return normal_conv
def __call__(self, input):
scale = self.conv_group_scale
blocks = []
# input 1/1
out = self._conv_norm(
input, 3, int(32 * scale), 2, 1, name=self.prefix_name + "conv1")
# 1/2
out = self.depthwise_separable(
out, 32, 64, 32, 1, scale, name=self.prefix_name + "conv2_1")
out = self.depthwise_separable(
out, 64, 128, 64, 2, scale, name=self.prefix_name + "conv2_2")
# 1/4
out = self.depthwise_separable(
out, 128, 128, 128, 1, scale, name=self.prefix_name + "conv3_1")
out = self.depthwise_separable(
out, 128, 256, 128, 2, scale, name=self.prefix_name + "conv3_2")
# 1/8
blocks.append(out)
out = self.depthwise_separable(
out, 256, 256, 256, 1, scale, name=self.prefix_name + "conv4_1")
out = self.depthwise_separable(
out, 256, 512, 256, 2, scale, name=self.prefix_name + "conv4_2")
# 1/16
blocks.append(out)
for i in range(5):
out = self.depthwise_separable(
out,
512,
512,
512,
1,
scale,
name=self.prefix_name + "conv5_" + str(i + 1))
module11 = out
out = self.depthwise_separable(
out, 512, 1024, 512, 2, scale, name=self.prefix_name + "conv5_6")
# 1/32
out = self.depthwise_separable(
out, 1024, 1024, 1024, 1, scale, name=self.prefix_name + "conv6")
module13 = out
blocks.append(out)
if not self.with_extra_blocks:
return blocks
num_filters = self.extra_block_filters
module14 = self._extra_block(module13, num_filters[0][0],
num_filters[0][1], 1, 2,
self.prefix_name + "conv7_1")
module15 = self._extra_block(module14, num_filters[1][0],
num_filters[1][1], 1, 2,
self.prefix_name + "conv7_2")
module16 = self._extra_block(module15, num_filters[2][0],
num_filters[2][1], 1, 2,
self.prefix_name + "conv7_3")
module17 = self._extra_block(module16, num_filters[3][0],
num_filters[3][1], 1, 2,
self.prefix_name + "conv7_4")
return module11, module13, module14, module15, module16, module17
| 36.57346
| 77
| 0.543994
|
7c72ff7a37714d5faef2242d63931e5810f85241
| 10,109
|
py
|
Python
|
MLBackend/trainlbp_knn.py
|
plsakr/CVAgePredictionTool
|
5324bcf22bbd146cf8abcc21bd3c0fe55813cbef
|
[
"Unlicense"
] | null | null | null |
MLBackend/trainlbp_knn.py
|
plsakr/CVAgePredictionTool
|
5324bcf22bbd146cf8abcc21bd3c0fe55813cbef
|
[
"Unlicense"
] | null | null | null |
MLBackend/trainlbp_knn.py
|
plsakr/CVAgePredictionTool
|
5324bcf22bbd146cf8abcc21bd3c0fe55813cbef
|
[
"Unlicense"
] | null | null | null |
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
import os
from skimage import feature
from tqdm import tqdm
import glob
import pickle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import LeavePOut #for P-cross validation
from sklearn.metrics import classification_report, accuracy_score
# Load the cascade
face_cascade = cv2.CascadeClassifier('./lbptest/haarcascade_frontalface_alt2.xml')
profile_face_cascade= cv2.CascadeClassifier('./lbptest/haarcascade_profileface.xml')
eyes_cascade = cv2.CascadeClassifier('./lbptest/frontalEyes.xml')
def imageResizing(img, scalingFactor):
#resizing the img
scaling_factor = scalingFactor
assert not isinstance(img,type(None)), 'image not found'
width = int(img.shape[1] * scaling_factor/100)
height = int(img.shape[0] * scaling_factor/100)
newDimensions = (width,height)
resizedImg = cv2.resize(img, newDimensions, interpolation = cv2.INTER_AREA)
return resizedImg
# Read the input image
def locateFace(img):
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(np.copy(img), (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2.imshow('img', img)
#cv2.waitKey()
if faces == ():
x=-1
y=-1
w=-1
h=-1
return faces, x,y,w,h
def locateProfile(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = profile_face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(np.copy(img), (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2.imshow('img', img)
#cv2.waitKey()
if faces == ():
x=-1
y=-1
w=-1
h=-1
return faces, x, y, w, h
def locateEyes(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
eyes = profile_face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw rectangle around the faces
for (x, y, w, h) in eyes:
cv2.rectangle(np.copy(img), (x, y), (x+w, y+h), (255, 0, 0), 2)
# Display the output
#cv2.imshow('img', img)
#cv2.waitKey()
if eyes == ():
x=-1
y=-1
w=-1
h=-1
return eyes, x, y, w,h
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist = hist / (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
def train(X, y, k_cross_validation_ratio, testing_size, optimal_k=True, min_range_k=0, max_range_k=0 ):
X0_train, X_test, y0_train, y_test = train_test_split(X,y,test_size=testing_size, random_state=7)
#Scaler is needed to scale all the inputs to a similar range
scaler = StandardScaler()
scaler = scaler.fit(X0_train)
X0_train = scaler.transform(X0_train)
X_test = scaler.transform(X_test)
#X_train, X_eval, y_train, y_eval = train_test_split(X0_train, y0_train, test_size= 100/k_cross_validation_ratio, random_state=7)
#finding the range for the optimal value of k either within the specified range (user input)
# or by our default range
if optimal_k and min_range_k>0 and max_range_k>min_range_k:
k_range= range(min_range_k, max_range_k)
else:
k_range=range(1,50)
scores = {}
scores_list = []
#finding the optimal nb of neighbors
for k in tqdm(k_range):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X0_train, y0_train)
y_pred = knn.predict(X_test)
scores[k] = metrics.accuracy_score(y_test, y_pred)
scores_list.append(metrics.accuracy_score(y_test, y_pred))
k_optimal = scores_list.index(max(scores_list))
model = KNeighborsClassifier(n_neighbors= k_optimal)
eval_score_list = []
#Evaluation using cross validation: lpo: leave p out
from sklearn.model_selection import StratifiedKFold
lpo = LeavePOut(p=1)
accuracys=[]
skf = StratifiedKFold(n_splits=10, random_state=None)
skf.get_n_splits(X0_train, y0_train)
for train_index, test_index in skf.split(X0_train, y0_train):
# print("TRAIN:", train_index, "Validation:", test_index)
X_train, X_eval = pd.DataFrame(X0_train).iloc[train_index], pd.DataFrame(X0_train).iloc[test_index]
y_train, y_eval = pd.DataFrame(y0_train).iloc[train_index], pd.DataFrame(y0_train).iloc[test_index]
model.fit(X0_train, y0_train)
predictions = model.predict(X_eval)
score = accuracy_score(predictions, y_eval)
accuracys.append(score)
#scores = cross_val_score(knn, X, y, cv=5, scoring='accuracy')
#eval_score_list.append(scores.mean())
#eval_accuracy = np.mean(eval_score_list)
eval_accuracy = np.mean(accuracys)
#save the pretrained model:
model_name='pretrained_knn_model'
pickle.dump(model, open(model_name, 'wb'))
return eval_accuracy, model, X0_train, y0_train, X_test, y_test
def test(X_train, y_train, X_test, y_test,pretrain_model=False):
model_name='pretrained_knn_model'
if pretrain_model:
model = pickle.load(open(model_name, 'rb' ))
else:
eval_score, model, X_train, y_train, X_test, y_test = train(X_test, y_test, pretrained_model=False)
print("Evaluation score: {}".format(eval_score))
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("Predictions shape: {}".format(y_pred.shape))
print("Y_test shape: {}".format(y_test))
classification_rep = classification_report(y_test, y_pred)
test_score = metrics.accuracy_score(y_test, y_pred)
return test_score, classification_rep
# if __name__ == '__main__':
path = './dataset/male' ## TODO: Change this to the path of your dataset. (The code will look through every subfolder for images)
images = []
for x, y, z in os.walk(path):
for name in tqdm(z):
images.append(os.path.join(x, name).replace('\\','/'))
lbp_df = pd.DataFrame()
# the parameters of the LBP algo
# higher = more time required
sample_points = 16
radius = 4
# this code takes a while
images_crop = []
# this code takes a while
count_empty=0
for i in tqdm(images):
if ".DS_Store" in i:
continue
img = cv2.imread(i)
faces, x,y,w,h = locateFace(img)
if not faces == ():
crop_img = img[y:y+h, x:x+w]
lbp = LocalBinaryPatterns(sample_points, radius).describe(cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY))
row = dict(zip(range(0, len(lbp)), lbp))
row['ageRange'] = i.split('/')[3] ## TODO: change 3 to the index in the path where the age range is located
## for example, mine was ../dataset/female/age_10_14/imagename => split by / => index 3
lbp_df = lbp_df.append(row, ignore_index=True)
else:
# profile_face, x,y,w,h = locateProfile(img)
# if not profile_face == ():
# crop_img = img[y:y+h, x:x+w]
# lbp = LocalBinaryPatterns(sample_points, radius).describe(cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY))
# row = dict(zip(range(0, len(lbp)), lbp))
# row['ageRange'] = i.split('/')[3] ## TODO: change 3 to the index in the path where the age range is located
# ## for example, mine was ../dataset/female/age_10_14/imagename => split by / => index 3
# lbp_df = lbp_df.append(row, ignore_index=True)
# else:
# eyes_frontal, x,y,w,h = locateEyes(img)
# if not eyes_frontal == ():
# crop_img = img[y:y+h, x:x+w]
# lbp = LocalBinaryPatterns(sample_points, radius).describe(cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY))
# row = dict(zip(range(0, len(lbp)), lbp))
# row['ageRange'] = i.split('/')[3] ## TODO: change 3 to the index in the path where the age range is located
# ## for example, mine was ../dataset/female/age_10_14/imagename => split by / => index 3
# lbp_df = lbp_df.append(row, ignore_index=True)
# #neither a frontal view nor a profile view /eyes of the face
# else:
count_empty = count_empty+1
print('Number of not found faces:', count_empty)
# TODO: DELETE UNECESSARY LINES
# number of null values in our df. Should always be 0
# lbp_df[2].isna().sum()
# corrM = lbp_df.corr()
# print(corrM)
# the age groups we decide we call 'young'
young = ['age_10_14',
'age_15_19',
'age_20_24']
# in this column, true means young, false means old
lbp_df['age_new'] = lbp_df['ageRange'].isin(young)
# lbp_df.head()
# randomize the df so that old and young are mixed
random_df = lbp_df.sample(frac=1).reset_index(drop=True)
# random_df.head()
X = random_df.drop(['ageRange','age_new'], axis=1)
y = random_df['age_new']
eval_accuracy, model, X_train, y_train, X_test, y_test = train(X, y, k_cross_validation_ratio=5, testing_size=0.2, optimal_k=True, min_range_k= 1, max_range_k=100)
test_score, conf_rep = test(X_train, y_train,X_test, y_test, pretrain_model=True)
print("Evaluation Score: {}".format(eval_accuracy))
print("Test Score: {}".format(test_score))
print(conf_rep)
| 33.47351
| 163
| 0.66792
|
aeb79c44fd6f9feb526e260b88c26d5e67e1afe1
| 380
|
py
|
Python
|
notices/Common.py
|
classroomtechtools/moodle_daily_notices
|
369ebe827f9c1a0aee37071b425cf3e9a09bda5c
|
[
"MIT"
] | null | null | null |
notices/Common.py
|
classroomtechtools/moodle_daily_notices
|
369ebe827f9c1a0aee37071b425cf3e9a09bda5c
|
[
"MIT"
] | null | null | null |
notices/Common.py
|
classroomtechtools/moodle_daily_notices
|
369ebe827f9c1a0aee37071b425cf3e9a09bda5c
|
[
"MIT"
] | null | null | null |
# shared strings
STUDENT_NOTICES = 'STUDENT_NOTICES'
TEACHER_NOTICES = 'TEACHER_NOTICES'
FULL_CONTENT = 'Full Content'
ATTACHED_CONTENT = 'Attached Content'
SCHOOL_SECTION = 'School Section'
NEXT_DAY = "next day"
# stuff that goes in settings
ACTIVITY_NAME = 'activity_name'
SENDER = 'sender'
PRIORITY_USERNAMES = 'priority_usernames'
AGENTS = 'agents'
SEARCH_DAY = "search_day"
| 25.333333
| 41
| 0.784211
|
ff742b8b21f0d11421b5e1fdd6ee00940d1e895a
| 2,428
|
py
|
Python
|
TIPE/dessiner_avion2.py
|
clementlagneau/TIPE-2019
|
5dc8f2c4619fadad6fbc5745e0887053c81a7bfe
|
[
"MIT"
] | null | null | null |
TIPE/dessiner_avion2.py
|
clementlagneau/TIPE-2019
|
5dc8f2c4619fadad6fbc5745e0887053c81a7bfe
|
[
"MIT"
] | null | null | null |
TIPE/dessiner_avion2.py
|
clementlagneau/TIPE-2019
|
5dc8f2c4619fadad6fbc5745e0887053c81a7bfe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 10:26:25 2018
@author: Clement LAGNEAU
"""
from copy import deepcopy
from avion import a
from strategie import strat2
from iteration import avance2
#b=strat2(a)
k=40
n=20
import tkinter
dico = {-1 : "red", 2 : "yellow",1 :"blue", 0: "white"}
score = 0
def init():
for x in range(9):
for y in range(n):
couleur[y][x]=canvas.create_rectangle((x*k, y*k,(x+1)*k, (y+1)*k), outline="gray", fill="red")
def calcul():
global score
global a
c,score=avance2(a,b,score)
for y in range(9):
for x in range(n):
canvas.itemconfig(couleur[x][y], fill=dico[c[x][y]])
a=deepcopy(c)
def final():
calcul()
fenetre.after(1000, final)
couleur = [[0 for x in range(9)] for y in range(n)]
fenetre = tkinter.Tk()
canvas = tkinter.Canvas(fenetre, width=k*n, height=k*n, highlightthickness=0)
canvas.pack()
init()
for y in range(9):
for x in range(n):
if b[x][y] == "h":
canvas.create_line(((y+0.5)*k, (x+0.5)*k, (y+0.5)*k, (x-0.5)*k),arrow='last')
if b[x][y] == "b":
canvas.create_line(((y+0.5)*k, (x+0.5)*k, (y+0.5)*k, (x+1.5)*k),arrow='last')
if b[x][y] == "d":
canvas.create_line(((y+0.5)*k, (x+0.5)*k, (y+1.5)*k, (x+0.5)*k),arrow='last')
if b[x][y] == "g":
canvas.create_line(((y+0.5)*k, (x+0.5)*k, (y-0.5)*k, (x+0.5)*k),arrow='last')
final()
fenetre.mainloop()
"""
[['s', 's', 's', 's', 'g', 's', 's', 's', 's'], ['i', 'i', 'i', 'i', 'h', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'h', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'h', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'h', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'h', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'h', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'h', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'h', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'b', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'b', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'b', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 'd', 'd', 'd', 'b', 'g', 'g', 'g', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 'i', 'i', 'i', 'b', 'i', 'i', 'i', 'i'], ['i', 's', 's', 's', 'd', 's', 's', 's', 'i']]
"""
| 37.9375
| 940
| 0.405684
|
22e6e39008c6f97a2102bc7f7fcdde52c4bd7bb9
| 8,190
|
py
|
Python
|
tools/project-creator/Python2.6.6/Lib/ctypes/test/test_pep3118.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 81
|
2017-03-13T08:24:01.000Z
|
2021-04-02T09:48:38.000Z
|
tools/project-creator/Python2.6.6/Lib/ctypes/test/test_pep3118.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 6
|
2017-04-30T08:36:55.000Z
|
2017-09-22T01:37:28.000Z
|
tools/project-creator/Python2.6.6/Lib/ctypes/test/test_pep3118.py
|
gohopo/nineck.ca
|
9601f5ae4c20f8a3ea27b06551556fa5e1eecce3
|
[
"MIT"
] | 41
|
2017-03-18T14:11:58.000Z
|
2021-04-14T05:06:09.000Z
|
import unittest
from ctypes import *
import re, struct, sys
if sys.byteorder == "little":
THIS_ENDIAN = "<"
OTHER_ENDIAN = ">"
else:
THIS_ENDIAN = ">"
OTHER_ENDIAN = "<"
class memoryview(object):
# This class creates a memoryview - like object from data returned
# by the private _ctypes._buffer_info() function, just enough for
# these tests.
#
# It can be removed when the py3k memoryview object is backported.
def __init__(self, ob):
from _ctypes import _buffer_info
self.format, self.ndim, self.shape = _buffer_info(ob)
if self.shape == ():
self.shape = None
self.itemsize = sizeof(ob)
else:
size = sizeof(ob)
for dim in self.shape:
size //= dim
self.itemsize = size
self.strides = None
self.readonly = False
self.size = sizeof(ob)
def normalize(format):
# Remove current endian specifier and white space from a format
# string
if format is None:
return ""
format = format.replace(OTHER_ENDIAN, THIS_ENDIAN)
return re.sub(r"\s", "", format)
class Test(unittest.TestCase):
def test_native_types(self):
for tp, fmt, shape, itemtp in native_types:
ob = tp()
v = memoryview(ob)
try:
self.failUnlessEqual(normalize(v.format), normalize(fmt))
self.failUnlessEqual(v.size, sizeof(ob))
self.failUnlessEqual(v.itemsize, sizeof(itemtp))
self.failUnlessEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.failUnlessEqual(v.strides, None)
# they are always read/write
self.failIf(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.failUnlessEqual(v.itemsize * n, v.size)
except:
# so that we can see the failing type
print(tp)
raise
def test_endian_types(self):
for tp, fmt, shape, itemtp in endian_types:
ob = tp()
v = memoryview(ob)
try:
self.failUnlessEqual(v.format, fmt)
self.failUnlessEqual(v.size, sizeof(ob))
self.failUnlessEqual(v.itemsize, sizeof(itemtp))
self.failUnlessEqual(v.shape, shape)
# ctypes object always have a non-strided memory block
self.failUnlessEqual(v.strides, None)
# they are always read/write
self.failIf(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.failUnlessEqual(v.itemsize * n, v.size)
except:
# so that we can see the failing type
print(tp)
raise
# define some structure classes
class Point(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
class PackedPoint(Structure):
_pack_ = 2
_fields_ = [("x", c_long), ("y", c_long)]
class Point2(Structure):
pass
Point2._fields_ = [("x", c_long), ("y", c_long)]
class EmptyStruct(Structure):
_fields_ = []
class aUnion(Union):
_fields_ = [("a", c_int)]
class Incomplete(Structure):
pass
class Complete(Structure):
pass
PComplete = POINTER(Complete)
Complete._fields_ = [("a", c_long)]
################################################################
#
# This table contains format strings as they look on little endian
# machines. The test replaces '<' with '>' on big endian machines.
#
native_types = [
# type format shape calc itemsize
## simple types
(c_char, "<c", None, c_char),
(c_byte, "<b", None, c_byte),
(c_ubyte, "<B", None, c_ubyte),
(c_short, "<h", None, c_short),
(c_ushort, "<H", None, c_ushort),
# c_int and c_uint may be aliases to c_long
#(c_int, "<i", None, c_int),
#(c_uint, "<I", None, c_uint),
(c_long, "<l", None, c_long),
(c_ulong, "<L", None, c_ulong),
# c_longlong and c_ulonglong are aliases on 64-bit platforms
#(c_longlong, "<q", None, c_longlong),
#(c_ulonglong, "<Q", None, c_ulonglong),
(c_float, "<f", None, c_float),
(c_double, "<d", None, c_double),
# c_longdouble may be an alias to c_double
(c_bool, "<?", None, c_bool),
(py_object, "<O", None, py_object),
## pointers
(POINTER(c_byte), "&<b", None, POINTER(c_byte)),
(POINTER(POINTER(c_long)), "&&<l", None, POINTER(POINTER(c_long))),
## arrays and pointers
(c_double * 4, "(4)<d", (4,), c_double),
(c_float * 4 * 3 * 2, "(2,3,4)<f", (2,3,4), c_float),
(POINTER(c_short) * 2, "(2)&<h", (2,), POINTER(c_short)),
(POINTER(c_short) * 2 * 3, "(3,2)&<h", (3,2,), POINTER(c_short)),
(POINTER(c_short * 2), "&(2)<h", None, POINTER(c_short)),
## structures and unions
(Point, "T{<l:x:<l:y:}", None, Point),
# packed structures do not implement the pep
(PackedPoint, "B", None, PackedPoint),
(Point2, "T{<l:x:<l:y:}", None, Point2),
(EmptyStruct, "T{}", None, EmptyStruct),
# the pep does't support unions
(aUnion, "B", None, aUnion),
## pointer to incomplete structure
(Incomplete, "B", None, Incomplete),
(POINTER(Incomplete), "&B", None, POINTER(Incomplete)),
# 'Complete' is a structure that starts incomplete, but is completed after the
# pointer type to it has been created.
(Complete, "T{<l:a:}", None, Complete),
# Unfortunately the pointer format string is not fixed...
(POINTER(Complete), "&B", None, POINTER(Complete)),
## other
# function signatures are not implemented
(CFUNCTYPE(None), "X{}", None, CFUNCTYPE(None)),
]
class BEPoint(BigEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
class LEPoint(LittleEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
################################################################
#
# This table contains format strings as they really look, on both big
# and little endian machines.
#
endian_types = [
(BEPoint, "T{>l:x:>l:y:}", None, BEPoint),
(LEPoint, "T{<l:x:<l:y:}", None, LEPoint),
(POINTER(BEPoint), "&T{>l:x:>l:y:}", None, POINTER(BEPoint)),
(POINTER(LEPoint), "&T{<l:x:<l:y:}", None, POINTER(LEPoint)),
]
if __name__ == "__main__":
unittest.main()
| 38.632075
| 99
| 0.445055
|
35cbebcf0ae4cb51b94dd580eb132e05d1c51124
| 60,838
|
py
|
Python
|
scipy/linalg/tests/test_basic.py
|
ikamensh/scipy
|
d645404be21b7c0b1e7ba24bf8d525b624aeb848
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/tests/test_basic.py
|
ikamensh/scipy
|
d645404be21b7c0b1e7ba24bf8d525b624aeb848
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/linalg/tests/test_basic.py
|
ikamensh/scipy
|
d645404be21b7c0b1e7ba24bf8d525b624aeb848
|
[
"BSD-3-Clause"
] | null | null | null |
import warnings
import itertools
import numpy as np
from numpy import (arange, array, dot, zeros, identity, conjugate, transpose,
float32)
import numpy.linalg as linalg
from numpy.random import random
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_array_almost_equal, assert_allclose,
assert_array_equal, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,
solve_banded, solveh_banded, solve_triangular,
solve_circulant, circulant, LinAlgError, block_diag,
matrix_balance, LinAlgWarning)
from scipy.linalg._testutils import assert_no_overwrite
from scipy._lib._testutils import check_free_memory
from scipy.linalg.blas import HAS_ILP64
REAL_DTYPES = [np.float32, np.float64, np.longdouble]
COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def _eps_cast(dtyp):
"""Get the epsilon for dtype, possibly downcast to BLAS types."""
dt = dtyp
if dt == np.longdouble:
dt = np.float64
elif dt == np.clongdouble:
dt = np.complex128
return np.finfo(dt).eps
class TestSolveBanded(object):
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
ab[2, :-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag(
ab[2, :-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1, 1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l, u = 2, 1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
def test_1x1(self):
b = array([[1., 2., 3.]])
x = solve_banded((1, 1), [[0], [2], [0]], b)
assert_array_equal(x, [[0.5, 1.0, 1.5]])
assert_equal(x.dtype, np.dtype('f8'))
assert_array_equal(b, [[1.0, 2.0, 3.0]])
def test_native_list_arguments(self):
a = [[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]]
ab = [[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]]
l, u = 2, 1
b = [10.0, 0.0, 2.0, 14.0]
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
class TestSolveHBanded(object):
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_tridiag_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[-1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_check_finite(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, check_finite=False)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
def test_1x1(self):
x = solveh_banded([[1]], [[1, 2, 3]])
assert_array_equal(x, [[1.0, 2.0, 3.0]])
assert_equal(x.dtype, np.dtype('f8'))
def test_native_list_arguments(self):
# Same as test_01_upper, using python's native list.
ab = [[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]]
b = [1.0, 4.0, 1.0, 2.0]
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
class TestSolve(object):
def setup_method(self):
np.random.seed(1234)
def test_20Feb04_bug(self):
a = [[1, 1], [1.0, 0]] # ok
x0 = solve(a, [1, 0j])
assert_array_almost_equal(dot(a, x0), [1, 0])
# gives failure with clapack.zgesv(..,rowmajor=0)
a = [[1, 1], [1.2, 0]]
b = [1, 0j]
x0 = solve(a, b)
assert_array_almost_equal(dot(a, x0), [1, 0])
def test_simple(self):
a = [[1, 20], [-30, 4]]
for b in ([[1, 0], [0, 1]], [1, 0],
[[2, 1], [-30, 4]]):
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_simple_sym(self):
a = [[2, 3], [3, 5]]
for lower in [0, 1]:
for b in ([[1, 0], [0, 1]], [1, 0]):
x = solve(a, b, sym_pos=1, lower=lower)
assert_array_almost_equal(dot(a, x), b)
def test_simple_sym_complex(self):
a = [[5, 2], [2, 4]]
for b in [[1j, 0],
[[1j, 1j],
[0, 2]],
]:
x = solve(a, b, sym_pos=1)
assert_array_almost_equal(dot(a, x), b)
def test_simple_complex(self):
a = array([[5, 2], [2j, 4]], 'D')
for b in [[1j, 0],
[[1j, 1j],
[0, 2]],
[1, 0j],
array([1, 0], 'D'),
]:
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_nils_20Feb04(self):
n = 2
A = random([n, n])+random([n, n])*1j
X = zeros((n, n), 'D')
Ainv = inv(A)
R = identity(n)+identity(n)*0j
for i in arange(0, n):
r = R[:, i]
X[:, i] = solve(A, r)
assert_array_almost_equal(X, Ainv)
def test_random(self):
n = 20
a = random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
for i in range(4):
b = random([n, 3])
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_random_complex(self):
n = 20
a = random([n, n]) + 1j * random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
for i in range(2):
b = random([n, 3])
x = solve(a, b)
assert_array_almost_equal(dot(a, x), b)
def test_random_sym(self):
n = 20
a = random([n, n])
for i in range(n):
a[i, i] = abs(20*(.1+a[i, i]))
for j in range(i):
a[i, j] = a[j, i]
for i in range(4):
b = random([n])
x = solve(a, b, sym_pos=1)
assert_array_almost_equal(dot(a, x), b)
def test_random_sym_complex(self):
n = 20
a = random([n, n])
a = a + 1j*random([n, n])
for i in range(n):
a[i, i] = abs(20*(.1+a[i, i]))
for j in range(i):
a[i, j] = conjugate(a[j, i])
b = random([n])+2j*random([n])
for i in range(2):
x = solve(a, b, sym_pos=1)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = [[1, 20], [-30, 4]]
for b in ([[1, 0], [0, 1]], [1, 0],
[[2, 1], [-30, 4]]):
x = solve(a, b, check_finite=False)
assert_array_almost_equal(dot(a, x), b)
def test_scalar_a_and_1D_b(self):
a = 1
b = [1, 2, 3]
x = solve(a, b)
assert_array_almost_equal(x.ravel(), b)
assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape')
def test_simple2(self):
a = np.array([[1.80, 2.88, 2.05, -0.89],
[525.00, -295.00, -95.00, -380.00],
[1.58, -2.69, -2.90, -1.04],
[-1.11, -0.66, -0.59, 0.80]])
b = np.array([[9.52, 18.47],
[2435.00, 225.00],
[0.77, -13.28],
[-6.22, -6.21]])
x = solve(a, b)
assert_array_almost_equal(x, np.array([[1., -1, 3, -5],
[3, 2, 4, 1]]).T)
def test_simple_complex2(self):
a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j],
[-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j],
[-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j],
[2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]])
b = np.array([[26.26+51.78j, 31.32-6.70j],
[64.30-86.80j, 158.60-14.20j],
[-5.75+25.31j, -2.15+30.19j],
[1.16+2.57j, -2.56+7.55j]])
x = solve(a, b)
assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j],
[2-3.j, 5+1.j],
[-4-5.j, -3+4.j],
[6.j, 2-3.j]]))
def test_hermitian(self):
# An upper triangular matrix will be used for hermitian matrix a
a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j],
[0, -4.63, -1.84+0.03j, 2.21+0.21j],
[0, 0, -8.87, 1.58-0.90j],
[0, 0, 0, -1.36]])
b = np.array([[2.98-10.18j, 28.68-39.89j],
[-9.58+3.88j, -24.79-8.40j],
[-0.77-16.05j, 4.23-70.02j],
[7.79+5.48j, -35.39+18.01j]])
res = np.array([[2.+1j, -8+6j],
[3.-2j, 7-2j],
[-1+2j, -1+5j],
[1.-1j, 3-4j]])
x = solve(a, b, assume_a='her')
assert_array_almost_equal(x, res)
# Also conjugate a and test for lower triangular data
x = solve(a.conj().T, b, assume_a='her', lower=True)
assert_array_almost_equal(x, res)
def test_pos_and_sym(self):
A = np.arange(1, 10).reshape(3, 3)
x = solve(np.tril(A)/9, np.ones(3), assume_a='pos')
assert_array_almost_equal(x, [9., 1.8, 1.])
x = solve(np.tril(A)/9, np.ones(3), assume_a='sym')
assert_array_almost_equal(x, [9., 1.8, 1.])
def test_singularity(self):
a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1]])
b = np.arange(9)[:, None]
assert_raises(LinAlgError, solve, a, b)
def test_ill_condition_warning(self):
a = np.array([[1, 1], [1+1e-16, 1-1e-16]])
b = np.ones(2)
with warnings.catch_warnings():
warnings.simplefilter('error')
assert_raises(LinAlgWarning, solve, a, b)
def test_empty_rhs(self):
a = np.eye(2)
b = [[], []]
x = solve(a, b)
assert_(x.size == 0, 'Returned array is not empty')
assert_(x.shape == (2, 0), 'Returned empty array shape is wrong')
def test_multiple_rhs(self):
a = np.eye(2)
b = np.random.rand(2, 3, 4)
x = solve(a, b)
assert_array_almost_equal(x, b)
def test_transposed_keyword(self):
A = np.arange(9).reshape(3, 3) + 1
x = solve(np.tril(A)/9, np.ones(3), transposed=True)
assert_array_almost_equal(x, [1.2, 0.2, 1])
x = solve(np.tril(A)/9, np.ones(3), transposed=False)
assert_array_almost_equal(x, [9, -5.4, -1.2])
def test_transposed_notimplemented(self):
a = np.eye(3).astype(complex)
with assert_raises(NotImplementedError):
solve(a, a, transposed=True)
def test_nonsquare_a(self):
assert_raises(ValueError, solve, [1, 2], 1)
def test_size_mismatch_with_1D_b(self):
assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3))
assert_raises(ValueError, solve, np.eye(3), np.ones(4))
def test_assume_a_keyword(self):
assert_raises(ValueError, solve, 1, 1, assume_a='zxcv')
@pytest.mark.skip(reason="Failure on OS X (gh-7500), "
"crash on Windows (gh-8064)")
def test_all_type_size_routine_combinations(self):
sizes = [10, 100]
assume_as = ['gen', 'sym', 'pos', 'her']
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for size, assume_a, dtype in itertools.product(sizes, assume_as,
dtypes):
is_complex = dtype in (np.complex64, np.complex128)
if assume_a == 'her' and not is_complex:
continue
err_msg = ("Failed for size: {}, assume_a: {},"
"dtype: {}".format(size, assume_a, dtype))
a = np.random.randn(size, size).astype(dtype)
b = np.random.randn(size).astype(dtype)
if is_complex:
a = a + (1j*np.random.randn(size, size)).astype(dtype)
if assume_a == 'sym': # Can still be complex but only symmetric
a = a + a.T
elif assume_a == 'her': # Handle hermitian matrices here instead
a = a + a.T.conj()
elif assume_a == 'pos':
a = a.conj().T.dot(a) + 0.1*np.eye(size)
tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6
if assume_a in ['gen', 'sym', 'her']:
# We revert the tolerance from before
# 4b4a6e7c34fa4060533db38f9a819b98fa81476c
if dtype in (np.float32, np.complex64):
tol *= 10
x = solve(a, b, assume_a=assume_a)
assert_allclose(a.dot(x), b,
atol=tol * size,
rtol=tol * size,
err_msg=err_msg)
if assume_a == 'sym' and dtype not in (np.complex64,
np.complex128):
x = solve(a, b, assume_a=assume_a, transposed=True)
assert_allclose(a.dot(x), b,
atol=tol * size,
rtol=tol * size,
err_msg=err_msg)
class TestSolveTriangular(object):
def test_simple(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1, 0], [1, 2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True)
assert_array_almost_equal(sol, [1, 0])
# check that it works also for non-contiguous matrices
sol = solve_triangular(A.T, b, lower=False)
assert_array_almost_equal(sol, [.5, .5])
# and that it gives the same result as trans=1
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [.5, .5])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
def test_simple_complex(self):
"""
solve_triangular on a simple 2x2 complex matrix
"""
A = array([[1+1j, 0], [1j, 2]])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
# check other option combinations with complex rhs
b = np.diag([1+1j, 1+2j])
sol = solve_triangular(A, b, lower=True, trans=0)
assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
sol = solve_triangular(A, b, lower=True, trans=2)
assert_array_almost_equal(sol, [[1j, -0.75-0.25j], [0, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=0)
assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=1)
assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]])
sol = solve_triangular(A.T, b, lower=False, trans=2)
assert_array_almost_equal(sol, [[1j, 0], [-0.5, 0.5+1j]])
def test_check_finite(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1, 0], [1, 2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True, check_finite=False)
assert_array_almost_equal(sol, [1, 0])
class TestInv(object):
def setup_method(self):
np.random.seed(1234)
def test_simple(self):
a = [[1, 2], [3, 4]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), np.eye(2))
a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), np.eye(3))
def test_random(self):
n = 20
for i in range(4):
a = random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv),
identity(n))
def test_simple_complex(self):
a = [[1, 2], [3, 4j]]
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
def test_random_complex(self):
n = 20
for i in range(4):
a = random([n, n])+2j*random([n, n])
for i in range(n):
a[i, i] = 20*(.1+a[i, i])
a_inv = inv(a)
assert_array_almost_equal(dot(a, a_inv),
identity(n))
def test_check_finite(self):
a = [[1, 2], [3, 4]]
a_inv = inv(a, check_finite=False)
assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]])
class TestDet(object):
def setup_method(self):
np.random.seed(1234)
def test_simple(self):
a = [[1, 2], [3, 4]]
a_det = det(a)
assert_almost_equal(a_det, -2.0)
def test_simple_complex(self):
a = [[1, 2], [3, 4j]]
a_det = det(a)
assert_almost_equal(a_det, -6+4j)
def test_random(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n, n])
d1 = det(a)
d2 = basic_det(a)
assert_almost_equal(d1, d2)
def test_random_complex(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n, n]) + 2j*random([n, n])
d1 = det(a)
d2 = basic_det(a)
assert_allclose(d1, d2, rtol=1e-13)
def test_check_finite(self):
a = [[1, 2], [3, 4]]
a_det = det(a, check_finite=False)
assert_almost_equal(a_det, -2.0)
def direct_lstsq(a, b, cmplx=0):
at = transpose(a)
if cmplx:
at = conjugate(at)
a1 = dot(at, a)
b1 = dot(at, b)
return solve(a1, b1)
class TestLstsq(object):
lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)
def setup_method(self):
np.random.seed(1234)
def test_simple_exact(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 20], [-30, 4]], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
for bt in (((1, 0), (0, 1)), (1, 0),
((2, 1), (-30, 4))):
# Store values in case they are overwritten
# later
a1 = a.copy()
b = np.array(bt, dtype=dtype)
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2,
f'expected efficient rank 2, got {r}')
assert_allclose(dot(a, x), b,
atol=25 * _eps_cast(a1.dtype),
rtol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_overdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
residuals = np.sum((b - a.dot(x))**2)
else:
residuals = out[1]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
assert_allclose(x, (-0.428571428571429, 0.85714285714285),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_overdet_complex(self):
for dtype in COMPLEX_DTYPES:
a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2+4j, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
res = b - a.dot(x)
residuals = np.sum(res * res.conj())
else:
residuals = out[1]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
assert_allclose(
x, (-0.4831460674157303 + 0.258426966292135j,
0.921348314606741 + 0.292134831460674j),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_underdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
b = np.array([1, 2], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(x, (-0.055555555555555, 0.111111111111111,
0.277777777777777),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_exact(self):
for dtype in REAL_DTYPES:
for n in (20, 200):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(random([n, n]), dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, 'expected efficient rank %s, '
'got %s' % (n, r))
if dtype is np.float32:
assert_allclose(
dot(a, x), b,
rtol=500 * _eps_cast(a1.dtype),
atol=500 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_complex_exact(self):
for dtype in COMPLEX_DTYPES:
for n in (20, 200):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(random([n, n]) + 1j*random([n, n]),
dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, 'expected efficient rank %s, '
'got %s' % (n, r))
if dtype is np.complex64:
assert_allclose(
dot(a, x), b,
rtol=400 * _eps_cast(a1.dtype),
atol=400 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_overdet(self):
for dtype in REAL_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(random([n, m]), dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, 'expected efficient rank %s, '
'got %s' % (m, r))
assert_allclose(
x, direct_lstsq(a, b, cmplx=0),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_complex_overdet(self):
for dtype in COMPLEX_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(random([n, m]) + 1j*random([n, m]),
dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(random([n, 3]), dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, 'expected efficient rank %s, '
'got %s' % (m, r))
assert_allclose(
x, direct_lstsq(a, b, cmplx=1),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_check_finite(self):
with suppress_warnings() as sup:
# On (some) OSX this tests triggers a warning (gh-7538)
sup.filter(RuntimeWarning,
"internal gelsd driver lwork query error,.*"
"Falling back to 'gelss' driver.")
at = np.array(((1, 20), (-30, 4)))
for dtype, bt, lapack_driver, overwrite, check_finite in \
itertools.product(REAL_DTYPES,
(((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))),
TestLstsq.lapack_drivers,
(True, False),
(True, False)):
a = at.astype(dtype)
b = np.array(bt, dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
check_finite=check_finite, overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(dot(a, x), b,
rtol=25 * _eps_cast(a.dtype),
atol=25 * _eps_cast(a.dtype),
err_msg=f"driver: {lapack_driver}")
def test_zero_size(self):
for a_shape, b_shape in (((0, 2), (0,)),
((0, 4), (0, 2)),
((4, 0), (4,)),
((4, 0), (4, 2))):
b = np.ones(b_shape)
x, residues, rank, s = lstsq(np.zeros(a_shape), b)
assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))
residues_should_be = (np.empty((0,)) if a_shape[1]
else np.linalg.norm(b, axis=0)**2)
assert_equal(residues, residues_should_be)
assert_(rank == 0, 'expected rank 0')
assert_equal(s, np.empty((0,)))
class TestPinv(object):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_singular(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv, a_pinv2)
def test_simple_cols(self):
a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv, a_pinv2)
def test_simple_rows(self):
a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv, a_pinv2)
def test_check_finite(self):
a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]])
a_pinv = pinv(a, check_finite=False)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a, check_finite=False)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_native_list_argument(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv, a_pinv2)
def test_tall_transposed(self):
a = random([10, 2])
a_pinv = pinv(a)
# The result will be transposed internally hence will be a C-layout
# instead of the typical LAPACK output with Fortran-layout
assert a_pinv.flags['C_CONTIGUOUS']
class TestPinvSymmetric(object):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
dtype=float))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_native_list_argument(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a.tolist())
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinv_pinv2_comparison(): # As reported in gh-8861
I_6 = np.eye(6)
Ts = np.diag([-1] * 4 + [-2], k=-1) + np.diag([-2] + [-1] * 4, k=1)
T = I_6 + Ts
A = 25 * (np.kron(I_6, T) + np.kron(Ts, I_6))
Ap, Ap2 = pinv(A), pinv2(A)
tol = 1e-11
assert_allclose(A @ Ap @ A - A, A @ Ap2 @ A - A, rtol=0., atol=tol)
assert_allclose(Ap @ A @ Ap - Ap, Ap2 @ A @ Ap2 - Ap2, rtol=0., atol=tol)
@pytest.mark.parametrize('scale', (1e-20, 1., 1e20))
@pytest.mark.parametrize('pinv_', (pinv, pinvh, pinv2))
def test_auto_rcond(scale, pinv_):
x = np.array([[1, 0], [0, 1e-10]]) * scale
expected = np.diag(1. / np.diag(x))
x_inv = pinv_(x)
assert_allclose(x_inv, expected)
class TestVectorNorms(object):
def test_types(self):
for dtype in np.typecodes['AllFloat']:
x = np.array([1, 2, 3], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
for dtype in np.typecodes['Complex']:
x = np.array([1j, 2j, 3j], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
def test_overflow(self):
# unlike numpy's norm, this one is
# safer on overflow
a = array([1e20], dtype=float32)
assert_almost_equal(norm(a), a)
def test_stable(self):
# more stable than numpy's norm
a = array([1e4] + [1]*10000, dtype=float32)
try:
# snrm in double precision; we obtain the same as for float64
# -- large atol needed due to varying blas implementations
assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
except AssertionError:
# snrm implemented in single precision, == np.linalg.norm result
msg = ": Result should equal either 0.0 or 0.5 (depending on " \
"implementation of snrm2)."
assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
def test_zero_norm(self):
assert_equal(norm([1, 0, 3], 0), 2)
assert_equal(norm([1, 2, 3], 0), 3)
def test_axis_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2)
assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2)
def test_keepdims_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
b = norm(a, axis=1, keepdims=True)
assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2)
assert_(b.shape == (2, 1, 2))
assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2)
@pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required")
def test_large_vector(self):
check_free_memory(free_mb=17000)
x = np.zeros([2**31], dtype=np.float64)
x[-1] = 1
res = norm(x)
del x
assert_allclose(res, 1.0)
class TestMatrixNorms(object):
def test_matrix_norms(self):
# Not all of these are matrix norms in the most technical sense.
np.random.seed(1234)
for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4):
for t in np.single, np.double, np.csingle, np.cdouble, np.int64:
A = 10 * np.random.randn(n, m).astype(t)
if np.issubdtype(A.dtype, np.complexfloating):
A = (A + 10j * np.random.randn(n, m)).astype(t)
t_high = np.cdouble
else:
t_high = np.double
for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf):
actual = norm(A, ord=order)
desired = np.linalg.norm(A, ord=order)
# SciPy may return higher precision matrix norms.
# This is a consequence of using LAPACK.
if not np.allclose(actual, desired):
desired = np.linalg.norm(A.astype(t_high), ord=order)
assert_allclose(actual, desired)
def test_axis_kwd(self):
a = np.array([[[2, 1], [3, 4]]] * 2, 'd')
b = norm(a, ord=np.inf, axis=(1, 0))
c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1))
d = norm(a, ord=1, axis=(0, 1))
assert_allclose(b, c)
assert_allclose(c, d)
assert_allclose(b, d)
assert_(b.shape == c.shape == d.shape)
b = norm(a, ord=1, axis=(1, 0))
c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1))
d = norm(a, ord=np.inf, axis=(0, 1))
assert_allclose(b, c)
assert_allclose(c, d)
assert_allclose(b, d)
assert_(b.shape == c.shape == d.shape)
def test_keepdims_kwd(self):
a = np.arange(120, dtype='d').reshape(2, 3, 4, 5)
b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True)
c = norm(a, ord=1, axis=(0, 1), keepdims=True)
assert_allclose(b, c)
assert_(b.shape == c.shape)
class TestOverwrite(object):
def test_solve(self):
assert_no_overwrite(solve, [(3, 3), (3,)])
def test_solve_triangular(self):
assert_no_overwrite(solve_triangular, [(3, 3), (3,)])
def test_solve_banded(self):
assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b),
[(4, 6), (6,)])
def test_solveh_banded(self):
assert_no_overwrite(solveh_banded, [(2, 6), (6,)])
def test_inv(self):
assert_no_overwrite(inv, [(3, 3)])
def test_det(self):
assert_no_overwrite(det, [(3, 3)])
def test_lstsq(self):
assert_no_overwrite(lstsq, [(3, 2), (3,)])
def test_pinv(self):
assert_no_overwrite(pinv, [(3, 3)])
def test_pinv2(self):
assert_no_overwrite(pinv2, [(3, 3)])
def test_pinvh(self):
assert_no_overwrite(pinvh, [(3, 3)])
class TestSolveCirculant(object):
def test_basic1(self):
c = np.array([1, 2, 3, 5])
b = np.array([1, -1, 1, 0])
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic2(self):
# b is a 2-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(12).reshape(4, 3)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_basic3(self):
# b is a 3-d matrix.
c = np.array([1, 2, -3, -5])
b = np.arange(24).reshape(4, 3, 2)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_complex(self):
# Complex b and c
c = np.array([1+2j, -3, 4j, 5])
b = np.arange(8).reshape(4, 2) + 0.5j
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_random_b_and_c(self):
# Random b and c
np.random.seed(54321)
c = np.random.randn(50)
b = np.random.randn(50)
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
def test_singular(self):
# c gives a singular circulant matrix.
c = np.array([1, 1, 0, 0])
b = np.array([1, 2, 3, 4])
x = solve_circulant(c, b, singular='lstsq')
y, res, rnk, s = lstsq(circulant(c), b)
assert_allclose(x, y)
assert_raises(LinAlgError, solve_circulant, x, y)
def test_axis_args(self):
# Test use of caxis, baxis and outaxis.
# c has shape (2, 1, 4)
c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]])
# b has shape (3, 4)
b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]])
x = solve_circulant(c, b, baxis=1)
assert_equal(x.shape, (4, 2, 3))
expected = np.empty_like(x)
expected[:, 0, :] = solve(circulant(c[0]), b.T)
expected[:, 1, :] = solve(circulant(c[1]), b.T)
assert_allclose(x, expected)
x = solve_circulant(c, b, baxis=1, outaxis=-1)
assert_equal(x.shape, (2, 3, 4))
assert_allclose(np.rollaxis(x, -1), expected)
# np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3).
x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1)
assert_equal(x.shape, (4, 2, 3))
assert_allclose(x, expected)
def test_native_list_arguments(self):
# Same as test_basic1 using python's native list.
c = [1, 2, 3, 5]
b = [1, -1, 1, 0]
x = solve_circulant(c, b)
y = solve(circulant(c), b)
assert_allclose(x, y)
class TestMatrix_Balance(object):
def test_string_arg(self):
assert_raises(ValueError, matrix_balance, 'Some string for fail')
def test_infnan_arg(self):
assert_raises(ValueError, matrix_balance,
np.array([[1, 2], [3, np.inf]]))
assert_raises(ValueError, matrix_balance,
np.array([[1, 2], [3, np.nan]]))
def test_scaling(self):
_, y = matrix_balance(np.array([[1000, 1], [1000, 0]]))
# Pre/post LAPACK 3.5.0 gives the same result up to an offset
# since in each case col norm is x1000 greater and
# 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5.
assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5)
def test_scaling_order(self):
A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]])
x, y = matrix_balance(A)
assert_allclose(solve(y, A).dot(y), x)
def test_separate(self):
_, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]),
separate=1)
assert_equal(int(np.diff(np.log2(y))), 5)
assert_allclose(z, np.arange(2))
def test_permutation(self):
A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))),
np.ones((3, 3)))
x, (y, z) = matrix_balance(A, separate=1)
assert_allclose(y, np.ones_like(y))
assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2]))
def test_perm_and_scaling(self):
# Matrix with its diagonal removed
cases = ( # Case 0
np.array([[0., 0., 0., 0., 0.000002],
[0., 0., 0., 0., 0.],
[2., 2., 0., 0., 0.],
[2., 2., 0., 0., 0.],
[0., 0., 0.000002, 0., 0.]]),
# Case 1 user reported GH-7258
np.array([[-0.5, 0., 0., 0.],
[0., -1., 0., 0.],
[1., 0., -0.5, 0.],
[0., 1., 0., -1.]]),
# Case 2 user reported GH-7258
np.array([[-3., 0., 1., 0.],
[-1., -1., -0., 1.],
[-3., -0., -0., 0.],
[-1., -0., 1., -1.]])
)
for A in cases:
x, y = matrix_balance(A)
x, (s, p) = matrix_balance(A, separate=1)
ip = np.empty_like(p)
ip[p] = np.arange(A.shape[0])
assert_allclose(y, np.diag(s)[ip, :])
assert_allclose(solve(y, A).dot(y), x)
| 37.647277
| 79
| 0.432115
|
b695ddf00dc1ee34342c4ca39e3e7bc7674b7434
| 15,731
|
py
|
Python
|
latools/filtering/filt_obj.py
|
douglascoenen/latools
|
d7fa4cfc70d480a1f079d4d209b0c8f33ac78c6d
|
[
"MIT"
] | null | null | null |
latools/filtering/filt_obj.py
|
douglascoenen/latools
|
d7fa4cfc70d480a1f079d4d209b0c8f33ac78c6d
|
[
"MIT"
] | null | null | null |
latools/filtering/filt_obj.py
|
douglascoenen/latools
|
d7fa4cfc70d480a1f079d4d209b0c8f33ac78c6d
|
[
"MIT"
] | null | null | null |
"""
An object used for storing, manipulating and modifying data filters.
"""
import re
import numpy as np
from difflib import SequenceMatcher as seqm
from latools.helpers.helpers import bool_2_indices
class filt(object):
"""
Container for creating, storing and selecting data filters.
Parameters
----------
size : int
The length that the filters need to be (should be
the same as your data).
analytes : array_like
A list of the analytes measured in your data.
Attributes
----------
size : int
The length that the filters need to be (should be
the same as your data).
analytes : array_like
A list of the analytes measured in your data.
components : dict
A dict containing each individual filter that has been
created.
info : dict
A dict containing descriptive information about each
filter in `components`.
params : dict
A dict containing the parameters used to create
each filter, which can be passed directly to the
corresponding filter function to recreate the filter.
switches : dict
A dict of boolean switches specifying which filters
are active for each analyte.
keys : dict
A dict of logical strings specifying which filters are
applied to each analyte.
sequence : dict
A numbered dict specifying what order the filters were
applied in (for some filters, order matters).
n : int
The number of filters applied to the data.
"""
def __init__(self, size, analytes):
self.size = size
self.analytes = analytes
self.index = {}
self.sets = {}
self.maxset = -1
self.components = {}
self.info = {}
self.params = {}
self.keys = {}
self.n = 0
self.switches = {}
self.sequence = {}
for a in self.analytes:
self.switches[a] = {}
def __repr__(self):
apad = max([len(a) for a in self.analytes] + [7])
astr = '{:' + '{:.0f}'.format(apad) + 's}'
leftpad = max([len(s) for s
in self.components.keys()] + [11]) + 2
out = '{string:{number}s}'.format(string='n', number=3)
out += '{string:{number}s}'.format(string='Filter Name', number=leftpad)
for a in self.analytes:
out += astr.format(a)
out += '\n'
reg = re.compile('[0-9]+_(.*)')
for n, t in self.index.items():
out += '{string:{number}s}'.format(string=str(n), number=3)
tn = reg.match(t).groups()[0]
out += '{string:{number}s}'.format(string=str(tn), number=leftpad)
for a in self.analytes:
out += astr.format(str(self.switches[a][t]))
out += '\n'
return(out)
def add(self, name, filt, info='', params=(), setn=None):
"""
Add filter.
Parameters
----------
name : str
filter name
filt : array_like
boolean filter array
info : str
informative description of the filter
params : tuple
parameters used to make the filter
Returns
-------
None
"""
iname = '{:.0f}_'.format(self.n) + name
self.index[self.n] = iname
if setn is None:
setn = self.maxset + 1
self.maxset = setn
if setn not in self.sets.keys():
self.sets[setn] = [iname]
else:
self.sets[setn].append(iname)
# self.keys is not added to?
self.components[iname] = filt
self.info[iname] = info
self.params[iname] = params
for a in self.analytes:
self.switches[a][iname] = False
self.n += 1
return
def remove(self, name=None, setn=None):
"""
Remove filter.
Parameters
----------
name : str
name of the filter to remove
setn : int or True
int: number of set to remove
True: remove all filters in set that 'name' belongs to
Returns
-------
None
"""
if isinstance(name, int):
name = self.index[name]
if setn is not None:
name = self.sets[setn]
del self.sets[setn]
elif isinstance(name, (int, str)):
name = [name]
if setn is True:
for n in name:
for k, v in self.sets.items():
if n in v:
name.append([m for m in v if m != n])
for n in name:
for k, v in self.sets.items():
if n in v:
self.sets[k] = [m for m in v if n != m]
del self.components[n]
del self.info[n]
del self.params[n]
del self.keys[n]
for a in self.analytes:
del self.switches[a][n]
return
def clear(self):
"""
Clear all filters.
"""
self.components = {}
self.info = {}
self.params = {}
self.switches = {}
self.keys = {}
self.index = {}
self.sets = {}
self.maxset = -1
self.n = 0
for a in self.analytes:
self.switches[a] = {}
return
def clean(self):
"""
Remove unused filters.
"""
for f in sorted(self.components.keys()):
unused = not any(self.switches[a][f] for a in self.analytes)
if unused:
self.remove(f)
def on(self, analyte=None, filt=None):
"""
Turn on specified filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
filt : optional. int, str or array_like
Name/number or iterable names/numbers of filters.
Returns
-------
None
"""
if isinstance(analyte, str):
analyte = [analyte]
if isinstance(filt, (int, float)):
filt = [filt]
elif isinstance(filt, str):
filt = self.fuzzmatch(filt, multi=True)
if analyte is None:
analyte = self.analytes
if filt is None:
filt = list(self.index.values())
for a in analyte:
for f in filt:
if isinstance(f, (int, float)):
f = self.index[int(f)]
try:
self.switches[a][f] = True
except KeyError:
f = self.fuzzmatch(f, multi=False)
self.switches[a][f] = True
# for k in self.switches[a].keys():
# if f in k:
# self.switches[a][k] = True
return
def off(self, analyte=None, filt=None):
"""
Turn off specified filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
filt : optional. int, list of int or str
Number(s) or partial string that corresponds to filter name(s).
Returns
-------
None
"""
if isinstance(analyte, str):
analyte = [analyte]
if isinstance(filt, (int, float)):
filt = [filt]
elif isinstance(filt, str):
filt = self.fuzzmatch(filt, multi=True)
if analyte is None:
analyte = self.analytes
if filt is None:
filt = list(self.index.values())
for a in analyte:
for f in filt:
if isinstance(f, int):
f = self.index[f]
try:
self.switches[a][f] = False
except KeyError:
f = self.fuzzmatch(f, multi=False)
self.switches[a][f] = False
# for k in self.switches[a].keys():
# if f in k:
# self.switches[a][k] = False
return
def make(self, analyte):
"""
Make filter for specified analyte(s).
Filter specified in filt.switches.
Parameters
----------
analyte : str or array_like
Name or list of names of analytes.
Returns
-------
array_like
boolean filter
"""
if analyte is None:
analyte = self.analytes
elif isinstance(analyte, str):
analyte = [analyte]
out = []
for f in self.components.keys():
for a in analyte:
if self.switches[a][f]:
out.append(f)
key = ' & '.join(sorted(out))
for a in analyte:
self.keys[a] = key
return self.make_fromkey(key)
def fuzzmatch(self, fuzzkey, multi=False):
"""
Identify a filter by fuzzy string matching.
Partial ('fuzzy') matching performed by `fuzzywuzzy.fuzzy.ratio`
Parameters
----------
fuzzkey : str
A string that partially matches one filter name more than the others.
Returns
-------
The name of the most closely matched filter. : str
"""
keys, ratios = np.array([(f, seqm(None, fuzzkey, f).ratio()) for f in self.components.keys()]).T
mratio = max(ratios)
if multi:
return keys[ratios == mratio]
else:
if sum(ratios == mratio) == 1:
return keys[ratios == mratio][0]
else:
raise ValueError("\nThe filter key provided ('{:}') matches two or more filter names equally well:\n".format(fuzzkey) + ', '.join(keys[ratios == mratio]) + "\nPlease be more specific!")
def make_fromkey(self, key):
"""
Make filter from logical expression.
Takes a logical expression as an input, and returns a filter. Used for advanced
filtering, where combinations of nested and/or filters are desired. Filter names must
exactly match the names listed by print(filt).
Example: ``key = '(Filter_1 | Filter_2) & Filter_3'``
is equivalent to:
``(Filter_1 OR Filter_2) AND Filter_3``
statements in parentheses are evaluated first.
Parameters
----------
key : str
logical expression describing filter construction.
Returns
-------
array_like
boolean filter
"""
if key != '':
def make_runable(match):
return "self.components['" + self.fuzzmatch(match.group(0)) + "']"
runable = re.sub('[^\(\)|& ]+', make_runable, key)
return eval(runable)
else:
return ~np.zeros(self.size, dtype=bool)
def make_keydict(self, analyte=None):
"""
Make logical expressions describing the filter(s) for specified analyte(s).
Parameters
----------
analyte : optional, str or array_like
Name or list of names of analytes.
Defaults to all analytes.
Returns
-------
dict
containing the logical filter expression for each analyte.
"""
if analyte is None:
analyte = self.analytes
elif isinstance(analyte, str):
analyte = [analyte]
out = {}
for a in analyte:
key = []
for f in self.components.keys():
if self.switches[a][f]:
key.append(f)
out[a] = ' & '.join(sorted(key))
self.keydict = out
return out
def grab_filt(self, filt, analyte=None):
"""
Flexible access to specific filter using any key format.
Parameters
----------
f : str, dict or bool
either logical filter expression, dict of expressions,
or a boolean
analyte : str
name of analyte the filter is for.
Returns
-------
array_like
boolean filter
"""
if isinstance(filt, str):
if filt in self.components:
if analyte is None:
return self.components[filt]
else:
if self.switches[analyte][filt]:
return self.components[filt]
else:
try:
ind = self.make_fromkey(filt)
except KeyError:
print(("\n\n***Filter key invalid. Please consult "
"manual and try again."))
elif isinstance(filt, dict):
try:
ind = self.make_fromkey(filt[analyte])
except ValueError:
print(("\n\n***Filter key invalid. Please consult manual "
"and try again.\nOR\nAnalyte missing from filter "
"key dict."))
elif filt:
ind = self.make(analyte)
else:
ind = ~np.zeros(self.size, dtype=bool)
return ind
def get_components(self, key, analyte=None):
"""
Extract filter components for specific analyte(s).
Parameters
----------
key : str
string present in one or more filter names.
e.g. 'Al27' will return all filters with
'Al27' in their names.
analyte : str
name of analyte the filter is for
Returns
-------
boolean filter : array-like
"""
out = {}
for k, v in self.components.items():
if key in k:
if analyte is None:
out[k] = v
elif self.switches[analyte][k]:
out[k] = v
return out
def get_info(self):
"""
Get info for all filters.
"""
out = ''
for k in sorted(self.components.keys()):
out += '{:s}: {:s}'.format(k, self.info[k]) + '\n'
return(out)
## TODO: [Low Priority] Re-write filt object to use pandas?
# class filt(object):
# def __init__(self, size, analytes):
# self.size = size
# self.analytes = analytes
# self.filter_table = pd.DataFrame(index=pd.MultiIndex(levels=[[], []], labels=[[], []], names=['N', 'desc']),
# columns=self.analytes)
# self.filters = Bunch()
# self.param = Bunch()
# self.info = Bunch()
# self.N = 0
# def __repr__(self):
# pass
# def add(self, name, filt, info='', params=()):
# self.filters[self.N] = filt
# self.param[self.N] = params
# self.info[self.N] = info
# self.filter_table.loc[(self.N, name), :] = False
# self.N += 1
# def remove(self):
# pass
# def clear(self):
# self.__init__(self.size, self.analytes)
# def clean(self):
# pass
# def on(self):
# pass
# def off(self):
# pass
# def make(self):
# pass
# def fuzzmatch(self):
# pass
# def make_fromkey(self):
# pass
# def make_keydict(self):
# pass
# def grab_filt(self):
# pass
# def get_components(self):
# pass
# def get_info(self):
# pass
| 28.395307
| 201
| 0.496536
|
68e0cf731c704bd346a53056787fac7107448516
| 7,306
|
py
|
Python
|
emonitor/modules/maps/content_admin.py
|
Durburz/eMonitor
|
56f3b1fe39b9da3a12b49bdd60d0cfca51c23351
|
[
"BSD-3-Clause"
] | 21
|
2015-03-04T11:36:47.000Z
|
2021-04-20T07:51:53.000Z
|
emonitor/modules/maps/content_admin.py
|
Durburz/eMonitor
|
56f3b1fe39b9da3a12b49bdd60d0cfca51c23351
|
[
"BSD-3-Clause"
] | 79
|
2015-01-04T21:35:49.000Z
|
2020-03-05T07:22:10.000Z
|
emonitor/modules/maps/content_admin.py
|
Durburz/eMonitor
|
56f3b1fe39b9da3a12b49bdd60d0cfca51c23351
|
[
"BSD-3-Clause"
] | 27
|
2015-03-04T11:36:48.000Z
|
2021-09-20T08:15:17.000Z
|
import os
from flask import request, render_template, current_app
from emonitor.extensions import db, babel, signal
from emonitor.modules.maps.map import Map
from emonitor.modules.settings.settings import Settings
import map_utils
OBSERVERACTIVE = 1
def getAdminContent(self, **params):
"""
Deliver admin content of module maps
:param params: use given parameters of request
:return: rendered template as string
"""
module = request.view_args['module'].split('/')
if len(module) > 1:
if module[1] == 'position':
if request.method == 'POST':
if request.form.get('action') == 'saveposition': # safe default map position and home position
Settings.set('defaultLat', request.form.get('default_lat', ''))
Settings.set('defaultLng', request.form.get('default_lng', ''))
Settings.set('defaultZoom', request.form.get('default_zoom', ''))
Settings.set('homeLat', request.form.get('home_lat', ''))
Settings.set('homeLng', request.form.get('home_lng', ''))
db.session.commit()
params.update({'settings': Settings})
return render_template('admin.map.position.html', **params)
else:
if request.method == 'POST':
if request.form.get('action') == 'savemap': # save map
for m in Map.getMaps(): # set all other maps
m.default = not request.form.get('map_default')
if request.form.get('map_id') != 'None': # update
_map = Map.getMaps(request.form.get('map_id'))
_map.name = request.form.get('map_name')
_map.path = request.form.get('map_path')
_map.maptype = int(request.form.get('map_type'))
_map.tileserver = request.form.get('map_tileserver{}'.format(request.form.get('map_type')))
_map.default = request.form.get('map_default')
else: # add map
_map = Map(request.form.get('map_name'), request.form.get('map_path'), int(request.form.get('map_type')), request.form.get('map_tileserver'), int(request.form.get('map_default')))
db.session.add(_map)
db.session.commit()
elif request.form.get('action') == 'createmap': # add map
params.update({'map': Map('', '', 0, '', 0), 'tilebase': current_app.config.get('PATH_TILES'), 'settings': Settings})
return render_template('admin.map_actions.html', **params)
elif request.form.get('action').startswith('detailmap_'): # edit map
params.update({'map': Map.getMaps(request.form.get('action').split('_')[-1]), 'settings': Settings, 'tilebase': current_app.config.get('PATH_TILES'), 'tiles': '\', \''.join(Settings.getMapTiles(int(request.form.get('action').split('_')[-1])))})
return render_template('admin.map_actions.html', **params)
elif request.form.get('action').startswith('deletemap_'): # delete map
db.session.delete(Map.getMaps(int(request.form.get('action').split('_')[-1])))
db.session.commit()
elif request.form.get('action') == 'ordersetting': # change map order
maps = []
for _id in request.form.getlist('mapids'):
_map = Map.getMaps(int(_id))
maps.append(dict(name=_map.__dict__['name'], path=_map.__dict__['path'], maptype=_map.__dict__['maptype'], tileserver=_map.__dict__['tileserver'], default=_map.__dict__['default']))
Map.query.delete() # delete all maps
for _map in maps: # add maps in new order
db.session.add(Map(_map['name'], _map['path'], _map['maptype'], _map['tileserver'], _map['default']))
db.session.commit()
params.update({'maps': Map.getMaps()})
return render_template('admin.map.html', **params)
def getAdminData(self, **params):
"""
Deliver admin content of module maps (ajax)
:return: rendered template as string or json dict
"""
if request.args.get('action') == 'checkpath':
if os.path.exists(request.args.get('path')):
return '1'
return '0'
elif request.args.get('action') == 'loadmap': # load tiles
tile_path = request.args.get('path')
def calcSubItems((pos)):
return [(2 * int(pos[0]), 2 * int(pos[1])), (2 * int(pos[0]) + 1, 2 * int(pos[1])), (2 * int(pos[0]), 2 * int(pos[1]) + 1), (2 * int(pos[0]) + 1, 2 * int(pos[1]) + 1)]
#def getTile(zoom, pos):
# global tile_path
# response = urllib2.urlopen('http://a.tile.openstreetmap.org/%s/%s/%s.png' % (zoom, pos[0], pos[1]))
# #fout = open(tile_path + str(zoom)+'/'+str(pos[0])+'-'+str(pos[1])+'.png', 'wb')
# fout = open("%s%s/%s-%s.png" % (tile_path, zoom, pos[0], pos[1]), 'wb')
# fout.write(response.read())
# fout.close()
_items = {12: [], 13: [], 14: [], 15: [], 16: [], 17: [], 18: []}
for t in request.args.get('tiles').split("-"):
if t != '':
_items[12].append(t.split(","))
for zoom in range(13, 19):
for i in _items[zoom - 1]:
_items[zoom] += calcSubItems(i)
result = map_utils.loadTiles('%s%s' % (current_app.config.get('PATH_TILES'), request.args.get('path')), _items)
if result == 0:
return babel.gettext('settings.map.loadinginprogress') # loading still active
elif result == 1:
return babel.gettext('settings.map.loadingstarted') # loading started
return ""
elif request.args.get('action') == 'stoptileload': # stop tile download
for i in map_utils.LOADTILES:
signal.send('map', 'tiledownloaddone', tiles=i)
map_utils.LOADTILES = []
map_utils.CURRENTLOADING = []
return {}
elif request.args.get('action') == 'maptiles':
_map = Map.getMaps(id=request.args.get('id'))
if _map:
return Map.getMapBox(tilepath=current_app.config.get('PATH_TILES'), mappath=_map.path)
return Map.getMapBox()
elif request.args.get('action') == 'loadosmdata': # load all data from openstreetmap
from emonitor.extensions import scheduler
from emonitor.lib.osm.loaddata import parseOsmData
#import time, datetime
mapdata = Map.getMaps()[0].getMapBox(tilepath=current_app.config.get('PATH_TILES'))
lat = [mapdata['min_latdeg']]
while lat[-1] + .05 < mapdata['max_latdeg']:
lat.append(lat[-1] + .05)
lat.append(mapdata['max_latdeg'])
lng = [mapdata['min_lngdeg']]
while lng[-1] + .05 < mapdata['max_lngdeg']:
lng.append(lng[-1] + .05)
lng.append(mapdata['max_lngdeg'])
scheduler.add_job(parseOsmData, kwargs={'lat': lat, 'lng': lng, 'path': current_app.config.get('PATH_DATA')})
return {'job': 'started'}
elif request.args.get('action') == 'findcity': # search citystring and deliver position
return map_utils.loadPositionOfCity(request.args.get('cityname'))
return ""
| 45.949686
| 260
| 0.575691
|
6ea7c62555bb58fa5f6200f1b67aa352c1957260
| 24,467
|
py
|
Python
|
Samples/sympl/python/etgen.py
|
TwoUnderscorez/dlr
|
60dfacb9852ec022dd076c152e286b116553c905
|
[
"Apache-2.0"
] | 307
|
2015-01-03T19:57:57.000Z
|
2022-03-30T21:22:59.000Z
|
Src/Languages/sympl/python/etgen.py
|
rudimk/dlr-dotnet
|
71d11769f99d6ff1516ddbaed091a359eb46c670
|
[
"MS-PL"
] | 72
|
2015-09-28T16:23:24.000Z
|
2022-03-14T00:47:04.000Z
|
Src/Languages/sympl/python/etgen.py
|
rudimk/dlr-dotnet
|
71d11769f99d6ff1516ddbaed091a359eb46c670
|
[
"MS-PL"
] | 85
|
2015-01-03T19:58:01.000Z
|
2021-12-23T15:47:11.000Z
|
import clr
if clr.use35:
clr.AddReference("Microsoft.Scripting.Core")
import Microsoft.Scripting.Ast as Exprs
else:
clr.AddReference("System.Core")
import System.Linq.Expressions as Exprs
from System.Collections.Generic import IEnumerable
from System.Dynamic import CallInfo
import System
import parser
import runtime
import lexer
### AnalyzeExpr performs semantic checkind and name binding on the expression.
### It returns an Expression.
###
#def AnalyzeExpr (expr, scope):
# expr.Analyze(scope)
# dict[type(expr)](scope)
#
#def AnalyzeImportExpr (self, scope):
# pass
#parser.SymplImportExpr.Analyze = AnalyzeImportExpr
###
def AnalyzeExpr (expr, scope):
exprtype = type(expr)
debugprint("exprtype: ", exprtype)
if exprtype is parser.SymplImportExpr:
return AnalyzeImportExpr(expr, scope)
elif exprtype is parser.SymplFunCallExpr:
return AnalyzeFunCallExpr(expr, scope)
elif exprtype is parser.SymplDefunExpr:
return AnalyzeDefunExpr(expr, scope)
elif exprtype is parser.SymplLambdaExpr:
return AnalyzeLambdaExpr(expr, scope)
elif exprtype is parser.SymplIdExpr:
return AnalyzeIdExpr(expr, scope)
elif exprtype is parser.SymplQuoteExpr:
return AnalyzeQuoteExpr(expr, scope)
elif exprtype is parser.SymplLiteralExpr:
return Exprs.Expression.Constant(expr.Value)
elif exprtype is parser.SymplAssignExpr:
return AnalyzeAssignExpr(expr, scope)
elif exprtype is parser.SymplLetStarExpr:
return AnalyzeLetStarExpr(expr, scope)
elif exprtype is parser.SymplBlockExpr:
return AnalyzeBlockExpr(expr, scope)
elif exprtype is parser.SymplEqExpr:
return AnalyzeEqExpr(expr, scope)
elif exprtype is parser.SymplConsExpr:
return AnalyzeConsExpr(expr, scope)
elif exprtype is parser.SymplListCallExpr:
return AnalyzeListCallExpr(expr, scope)
elif exprtype is parser.SymplIfExpr:
return AnalyzeIfExpr(expr, scope)
elif exprtype is parser.SymplDottedExpr:
return AnalyzeDottedExpr(expr, scope)
elif exprtype is parser.SymplLoopExpr:
return AnalyzeLoopExpr(expr, scope)
elif exprtype is parser.SymplBreakExpr:
return AnalyzeBreakExpr(expr, scope)
elif exprtype is parser.SymplEltExpr:
return AnalyzeEltExpr(expr, scope)
elif exprtype is parser.SymplNewExpr:
return AnalyzeNewExpr(expr, scope)
elif exprtype is parser.SymplBinaryExpr:
return AnalyzeBinaryExpr(expr, scope)
elif exprtype is parser.SymplUnaryExpr:
return AnalyzeUnaryExpr(expr, scope)
else:
raise Exception("Internal: no expression to analyze -- " +
repr(expr))
### Returns a call to the import runtime helper function.
###
def AnalyzeImportExpr (expr, scope):
debugprint("analyze import ...")
if type(expr) is not parser.SymplImportExpr:
raise Exception("Internal: need import expr to analyze.")
if not scope.IsModule():
raise Exception("Import expression must be a top level expression.")
return runtime.MakeSymplImportCall(scope.RuntimeExpr, scope.ModuleExpr,
expr.NamespaceExpr, expr.MemberNames,
expr.Renames)
def AnalyzeDefunExpr (expr, scope):
debugprint("analyze defun ...", expr.Name.Name)
if type(expr) is not parser.SymplDefunExpr:
raise Exception("Internal: need defun to analyze.")
if not scope.IsModule():
raise Exception("Use Defmethod or Lambda when not defining " +
"top-level function.")
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetSetMemberBinder(expr.Name.Name),
object,
[scope.ModuleExpr,
AnalyzeLambdaDef(expr, scope, "defun " + expr.Name.Name)])
def AnalyzeLambdaExpr (expr, scope):
debugprint("analyze lambda ...")
if type(expr) is not parser.SymplLambdaExpr:
raise Exception("Internal: need lambda to analyze.")
return AnalyzeLambdaDef(expr, scope, "lambda")
def AnalyzeLambdaDef (expr, scope, description):
funscope = AnalysisScope(scope, description)
funscope.IsLambda = True # needed for return support.
paramsInOrder = []
for p in expr.Params:
var = Exprs.Expression.Parameter(object, p.Name)
paramsInOrder.append(var)
funscope.Names[p.Name.lower()] = var
## No need to add fun name to module scope since recursive call just looks
## up global name late bound. For lambdas,to get the effect of flet to
## support recursion, bind a variable to nil and then set it to a lambda.
## Then the lambda's body can refer to the let bound var in its def.
body = []
for e in expr.Body:
body.append(AnalyzeExpr(e, funscope))
return Exprs.Expression.Lambda(
Exprs.Expression.GetFuncType(
System.Array[System.Type](
[object] * (len(expr.Params) + 1))),
## Due to .NET 4.0 co/contra-variance, IPy's binding isn't picking
## the overload with just IEnumerable<Expr>, so pick it explicitly.
Exprs.Expression.Block.Overloads[IEnumerable[Exprs.Expression]](body),
paramsInOrder)
### Returns a dynamic InvokeMember or Invoke expression, depending on the
### Function expression.
###
def AnalyzeFunCallExpr (expr, scope):
debugprint("analyze function ...", expr.Function)
if type(expr) is not parser.SymplFunCallExpr:
raise Exception("Internal: need function call to analyze.")
if type(expr.Function) is parser.SymplDottedExpr:
if len(expr.Function.Exprs) > 1:
objExpr = AnalyzeDottedExpr(
parser.SymplDottedExpr(expr.Function.ObjectExpr,
expr.Function.Exprs[:-1]),
scope)
else:
objExpr = AnalyzeExpr(expr.Function.ObjectExpr, scope)
args = [AnalyzeExpr(a, scope) for a in expr.Arguments]
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetInvokeMemberBinder(
runtime.InvokeMemberBinderKey(
## Last must be ID.
expr.Function.Exprs[-1].IdToken.Name,
CallInfo(len(args)))),
object,
[objExpr] + args)
else:
fun = AnalyzeExpr(expr.Function, scope)
args = [AnalyzeExpr(a, scope) for a in expr.Arguments]
## Use DynExpr so that I don't always have to have a delegate to call,
## such as what happens with IPy interop.
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetInvokeBinder(CallInfo(len(args))),
object,
[fun] + args)
### Returns a chain of GetMember and InvokeMember dynamic expressions for
### the dotted expr.
###
def AnalyzeDottedExpr (expr, scope):
debugprint("analyze dotted ...", expr.ObjectExpr)
if type(expr) is not parser.SymplDottedExpr:
raise Exception("Internal: need dotted expr to analyze.")
curExpr = AnalyzeExpr(expr.ObjectExpr, scope)
for e in expr.Exprs:
if type(e) is parser.SymplIdExpr:
tmp = Exprs.Expression.Dynamic(
scope.GetRuntime().GetGetMemberBinder(e.IdToken.Name),
object, [curExpr])
elif type(e) is parser.SymplFunCallExpr:
tmp = Exprs.Expression.Dynamic(
scope.GetRuntime().GetInvokeMemberBinder(
runtime.InvokeMemberBinderKey(
## Dotted exprs must be simple invoke members,
## a.b.(c ...), that is, function is identifier.
e.Function.IdToken.Name,
CallInfo(len(e.Arguments)))),
object,
[curExpr] + e.Arguments)
else:
raise Exception("Internal: dotted must be IDs or Funs.")
curExpr = tmp
return curExpr
### AnalyzeAssignExpr handles IDs, indexing, and member sets. IDs are either
### lexical or dynamic exprs on the module scope (ExpandoObject). Everything
### else is dynamic.
###
def AnalyzeAssignExpr (expr, scope):
debugprint("analyze expr ...", expr.Location)
loctype = type(expr.Location)
if loctype is parser.SymplIdExpr:
lhs = AnalyzeExpr(expr.Location, scope)
val = AnalyzeExpr(expr.Value, scope)
param = _findIdDef(expr.Location.IdToken.Name, scope)
if param is not None:
return Exprs.Expression.Assign(
lhs,
Exprs.Expression.Convert(val, param.Type))
else:
tmp = Exprs.Expression.Parameter(object, "assignTmpForRes")
return Exprs.Expression.Block([tmp], [
Exprs.Expression.Assign(
tmp,
Exprs.Expression.Convert(val, object)),
Exprs.Expression.Dynamic(
scope.GetRuntime().GetSetMemberBinder(
expr.Location.IdToken.Name),
object,
[scope.GetModuleExpr(), tmp]),
tmp])
elif loctype is parser.SymplEltExpr:
obj = AnalyzeExpr(expr.Location.ObjectExpr, scope)
args = [AnalyzeExpr(x, scope) for x in expr.Location.Indexes]
args.append(AnalyzeExpr(expr.Value, scope))
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetSetIndexBinder(
CallInfo(len(expr.Location.Indexes))),
object,
[obj] + args)
elif loctype is parser.SymplDottedExpr:
## For now, one dot only. Later, pick oflast dotted member
## access (like AnalyzeFunctionCall), and use a temp and block.
if len(expr.Location.Exprs) > 1:
raise Exception("Don't support assigning with more than simple " +
"dotted expression, o.foo.")
if not isinstance(expr.Location.Exprs[0], parser.SymplIdExpr):
raise Exception("Only support unindexed field or property when " +
"assigning dotted expression location.")
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetSetMemberBinder(
expr.Location.Exprs[0].IdToken.Name),
object,
[AnalyzeExpr(expr.Location.ObjectExpr, scope),
AnalyzeExpr(expr.Value, scope)])
### Return an Expression for referencing the ID. If we find the name in the
### scope chain, then we just return the stored ParamExpr. Otherwise, the
### reference is a dynamic member lookup on the root scope, a module object.
###
def AnalyzeIdExpr (expr, scope):
debugprint("analyze ID ...", expr.IdToken.Name)
if type(expr) is not parser.SymplIdExpr:
raise Exception("Internal: need ID Expr to analyze.")
if expr.IdToken.IsKeywordToken:
if expr.IdToken is parser.lexer.KeywordToken.Nil:
return Exprs.Expression.Constant(None, object)
elif expr.IdToken is parser.lexer.KeywordToken.True:
return Exprs.Expression.Constant(True)
elif expr.IdToken is parser.lexer.KeywordToken.False:
return Exprs.Expression.Constant(False)
else:
raise Exception("Internal: unrecognized keyword literal constant.")
else:
param = _findIdDef(expr.IdToken.Name, scope)
if param is not None:
return param
else:
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetGetMemberBinder(expr.IdToken.Name),
object,
scope.GetModuleExpr())
### _findIdDef returns the ParameterExpr for the name by searching the scopes,
### or it returns None.
###
def _findIdDef (name, scope):
curscope = scope
name = name.lower()
while curscope is not None and not curscope.IsModule():
if name in curscope.Names:
return curscope.Names[name]
else:
curscope = curscope.Parent
if curscope is None:
raise Exception("Got bad AnalysisScope chain with no module at end.")
return None
### AnalyzeLetStar returns a Block with vars, each initialized in the order
### they appear. Each var's init expr can refer to vars initialized before it.
### The Block's body is the Let*'s body.
###
def AnalyzeLetStarExpr (expr, scope):
debugprint("analyze let* ...")
if type(expr) is not parser.SymplLetStarExpr:
raise Exception("Internal: need Let* Expr to analyze.")
letscope = AnalysisScope(scope, "let*")
## Analyze bindings.
inits = []
varsInOrder = []
for b in expr.Bindings:
## Need richer logic for mvbind
var = Exprs.Expression.Parameter(object, b[0].Name)
varsInOrder.append(var)
inits.append(Exprs.Expression.Assign(
var,
Exprs.Expression.Convert(AnalyzeExpr(b[1], letscope),
var.Type)))
## Add var to scope after analyzing init value so that init value
## references to the same ID do not bind to his uninitialized var.
letscope.Names[b[0].Name.lower()] = var
body = []
for e in expr.Body:
body.append(AnalyzeExpr(e, letscope))
## Order of vars to BlockExpr don't matter semantically, but may as well
## keep them in the order the programmer specified in case they look at the
## Expr Trees in the debugger or for meta-programming.
return Exprs.Expression.Block(object, varsInOrder, inits + body)
### AnalyzeBlockExpr returns a Block with the body exprs.
###
def AnalyzeBlockExpr (expr, scope):
debugprint("analyze block ...")
if type(expr) is not parser.SymplBlockExpr:
raise Exception("Internal: need Block Expr to analyze.")
body = []
for e in expr.Body:
body.append(AnalyzeExpr(e, scope))
## Due to .NET 4.0 co/contra-variance, IPy's binding isn't picking the overload
## with Type and IEnumerable<Expr>, so pick it explicitly.
return Exprs.Expression.Block.Overloads[
System.Type, IEnumerable[Exprs.Expression]](object, body)
### AnalyzeQuoteExpr converts a list, literal, or id expr to a runtime quoted
### literal and returns the Constant expression for it.
###
def AnalyzeQuoteExpr (expr, scope):
debugprint("analyze quote ...")
if type(expr) is not parser.SymplQuoteExpr:
raise Exception("Internal: need Quote Expr to analyze.")
return Exprs.Expression.Constant(
MakeQuoteConstant(expr.Expr, scope.GetRuntime()))
def MakeQuoteConstant (expr, symplRuntime):
if type(expr) is parser.SymplListExpr:
exprs = []
for e in expr.Elements:
exprs.append(MakeQuoteConstant(e, symplRuntime))
return runtime.Cons._List(*exprs)
elif isinstance(expr, lexer.IdOrKeywordToken):
return symplRuntime.MakeSymbol(expr.Name)
elif isinstance(expr, lexer.LiteralToken):
return expr.Value
else:
raise Exception("Internal: quoted list has -- " + repr(expr))
def AnalyzeEqExpr (expr, scope):
debugprint("analyze eq ...")
if type(expr) is not parser.SymplEqExpr:
raise Exception("Internal: need eq expr to analyze.")
return runtime.MakeSymplEqCall(AnalyzeExpr(expr.Left, scope),
AnalyzeExpr(expr.Right, scope))
def AnalyzeConsExpr (expr, scope):
debugprint("analyze cons ...")
if type(expr) is not parser.SymplConsExpr:
raise Exception("Internal: need cons expr to analyze.")
return runtime.MakeSymplConsCall(AnalyzeExpr(expr.Left, scope),
AnalyzeExpr(expr.Right, scope))
def AnalyzeListCallExpr (expr, scope):
debugprint("analyze List call ...")
if type(expr) is not parser.SymplListCallExpr:
raise Exception("Internal: need import expr to analyze.")
return runtime.MakeSymplListCall([AnalyzeExpr(x, scope)
for x in expr.Elements])
def AnalyzeIfExpr (expr, scope):
if type(expr) is not parser.SymplIfExpr:
raise Exception("Internal: need IF expr to analyze.")
if expr.Alternative is not None:
alt = AnalyzeExpr(expr.Alternative, scope)
else:
alt = Exprs.Expression.Constant(False)
return Exprs.Expression.Condition(
WrapBooleanTest(AnalyzeExpr(expr.Test, scope)),
Exprs.Expression.Convert(AnalyzeExpr(expr.Consequent, scope),
object),
Exprs.Expression.Convert(alt, object))
def WrapBooleanTest (expr):
tmp = Exprs.Expression.Parameter(object, "testtmp")
return Exprs.Expression.Block(
[tmp],
[Exprs.Expression.Assign(tmp, Exprs.Expression.Convert(expr, object)),
Exprs.Expression.Condition(
Exprs.Expression.TypeIs(tmp, bool),
Exprs.Expression.Convert(tmp, bool),
Exprs.Expression.NotEqual(tmp, Exprs.Expression.Constant(None)))])
def AnalyzeLoopExpr (expr, scope):
debugprint("analyze loop ...")
if type(expr) is not parser.SymplLoopExpr:
raise Exception("Internal: need loop to analyze.")
loopscope = AnalysisScope(scope, "loop ")
loopscope.IsLoop = True # needed for break and continue
loopscope.LoopBreak = Exprs.Expression.Label(object, "loop break")
body = []
for e in expr.Body:
body.append(AnalyzeExpr(e, loopscope))
## Due to .NET 4.0 co/contra-variance, IPy's binding isn't picking the overload
## with Type and IEnumerable<Expr>, so pick it explicitly.
return Exprs.Expression.Loop(Exprs.Expression.Block.Overloads
[System.Type, IEnumerable[Exprs.Expression]]
(object, body),
loopscope.LoopBreak)
def AnalyzeBreakExpr (expr, scope):
debugprint("analyze break ..." + repr(expr.Value))
if type(expr) is not parser.SymplBreakExpr:
raise Exception("Internal: need break to analyze.")
loopscope = _findFirstLoop(scope)
if loopscope is None:
raise Exception("Call to Break not inside loop.")
if expr.Value is None:
value = Exprs.Expression.Constant(None, object)
else:
## Ok if value jumps to break label.
value = AnalyzeExpr(expr.Value, scope)
## Need final type=object arg because the Goto is in a value returning
## position, and the Break factory doesn't set the GotoExpr.Type property
## to the type of the LoopBreak label target's type.
return Exprs.Expression.Break(loopscope.LoopBreak, value, object)
### _findFirstLoop returns the first loop AnalysisScope or None.
###
def _findFirstLoop (scope):
curscope = scope
while curscope is not None:
if curscope.IsLoop:
return curscope
else:
curscope = curscope.Parent
return None
def AnalyzeNewExpr (expr, scope):
debugprint("analyze new ...", expr.Typ)
if type(expr) is not parser.SymplNewExpr:
raise Exception("Internal: need New call to analyze.")
typ = AnalyzeExpr(expr.Typ, scope)
args = [AnalyzeExpr(a, scope) for a in expr.Arguments]
## Use DynExpr since we don't know type until runtime.
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetCreateInstanceBinder(CallInfo(len(args))),
object,
[typ] + args)
### AnalyzeEltExpr returns and Expression for accessing an element of an
### aggregate structure. This also works for .NET objs with indexer Item
### properties. We handle analyzing Elt for assignment in AnalyzeAssignExpr.
###
def AnalyzeEltExpr (expr, scope):
debugprint("analyze elt ...", expr.ObjectExpr)
if type(expr) is not parser.SymplEltExpr:
raise Exception("Internal: need Elt call to analyze.")
obj = AnalyzeExpr(expr.ObjectExpr, scope)
args = [AnalyzeExpr(a, scope) for a in expr.Indexes]
## Use DynExpr since we don't know obj until runtime.
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetGetIndexBinder(CallInfo(len(args))),
object,
[obj] + args)
def AnalyzeBinaryExpr (expr, scope):
if type(expr) is not parser.SymplBinaryExpr:
raise Exception("Internal: need binary op to analyze.")
if expr.Op == Exprs.ExpressionType.And:
## (and x y) is (if x y)
return AnalyzeIfExpr(parser.SymplIfExpr(expr.Left, expr.Right, None),
scope)
elif expr.Op == Exprs.ExpressionType.Or:
## (or x y) is (let ((tmpx x))
## (if tmpx tmpx (let ((tmp2 y)) (if tmp2 tmp2))))
##
## Build inner let for y first.
## Real impl needs to ensure unique ID in scope chain.
tmp2 = lexer.IdOrKeywordToken("__tmpOrLetVar2", False) #False = not kwd
tmpExpr2 = parser.SymplIdExpr(tmp2)
bindings2 = [(tmp2, expr.Right)]
ifExpr2 = parser.SymplIfExpr(tmpExpr2, tmpExpr2, None)
letExpr2 = parser.SymplLetStarExpr(bindings2, [ifExpr2])
## Build outer let for x.
tmp1 = lexer.IdOrKeywordToken("__tmpOrLetVar1", False) #False = not kwd
tmpExpr1 = parser.SymplIdExpr(tmp1)
bindings1 = [(tmp1, expr.Left)]
ifExpr1 = parser.SymplIfExpr(tmpExpr1, tmpExpr1, letExpr2)
return AnalyzeLetStarExpr(
parser.SymplLetStarExpr(bindings1, [ifExpr1]),
scope)
else:
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetBinaryOperationBinder(expr.Op),
object,
AnalyzeExpr(expr.Left, scope),
AnalyzeExpr(expr.Right, scope))
def AnalyzeUnaryExpr (expr, scope):
if type(expr) is not parser.SymplUnaryExpr:
raise Exception("Internal: need Unary op to analyze.")
if expr.Op == Exprs.ExpressionType.Not:
## Sympl has specific semantics for what is true vs. false and would
## use the OnesComplement node kind if Sympl had that.
return Exprs.Expression.Not(WrapBooleanTest(AnalyzeExpr(expr.Operand,
scope)))
else:
## Should never get here unless we add, say, unary minus.
return Exprs.Expression.Dynamic(
scope.GetRuntime().GetUnaryOperationBinder(expr.Op),
object,
AnalyzeExpr(expr.Operand, scope))
### AnalysisScope holds identifier information so that we can do name binding
### during analysis. It manages a map from names to ParameterExprs so ET
### definition locations and reference locations can alias the same variable.
###
### These chain from inner most BlockExprs, through LambdaExprs, to the root
### which models a file or top-level expression. The root has non-None
### ModuleExpr and RuntimeExpr, which are ParameterExprs.
###
class AnalysisScope (object):
def __init__ (self, parent, nam = "", runtime = None, runtimeParam = None,
moduleParam = None):
self.ModuleExpr = moduleParam
self.RuntimeExpr = runtimeParam
## Need runtime for interning Symbol constants at code gen time.
self.Runtime = runtime
self.Name = nam
self.Parent = parent
self.Names = {}
## Need IsLambda when support return to find tightest closing fun.
self.IsLambda = False
self.IsLoop = False
self.LoopBreak = None
self.LoopContinue = None
def IsModule (self):
return self.ModuleExpr is not None
def GetModuleExpr (self):
curscope = self
while not curscope.IsModule():
curscope = curscope.Parent
return curscope.ModuleExpr
def GetRuntime (self):
curscope = self
while curscope.Runtime is None:
curscope = curscope.Parent
return curscope.Runtime
##################
### Dev-time Utils
##################
_debug = False
def debugprint (*stuff):
if _debug:
for x in stuff:
print x,
print
| 41.96741
| 85
| 0.629419
|
406e57fb64f0d30c5eb09008e3544fd072b4a1fc
| 2,950
|
py
|
Python
|
scripts/run_s2p.py
|
willyh101/analysis
|
64ac21ae58b56dfef2d240049777432a64ee041c
|
[
"MIT"
] | null | null | null |
scripts/run_s2p.py
|
willyh101/analysis
|
64ac21ae58b56dfef2d240049777432a64ee041c
|
[
"MIT"
] | null | null | null |
scripts/run_s2p.py
|
willyh101/analysis
|
64ac21ae58b56dfef2d240049777432a64ee041c
|
[
"MIT"
] | null | null | null |
from suite2p.run_s2p import run_s2p
import os
import shutil
import glob
from ScanImageTiffReader import ScanImageTiffReader
# data location and inputs
# animalid = 'w32_2'
# date = '20210420'
# expt_ids = ['1','2','3']
# result_base = 'E:/functional connectivity'
animalid = 'w42_2'
date = '20220302'
expt_ids = ['1', '2', '3', '4']
result_base = 'E:/functional connectivity'
# tiff_base = 'D:/Frankenrig/Experiments/'
tiff_base = 'F:/experiments'
diameter = 10
cleanup_fask_disk = True
default_ops = {
# general
'diameter': diameter,
'fast_disk': 'K:/tmp/s2p_python',
'do_bidiphase': True,
'save_mat': False,
'tau': 1.0,
# registration
'keep_movie_raw': True, # must be true for 2 step reg
'two_step_registration': True,
'nimg_init': 500, # subsampled frames for finding reference image
'batch_size': 500, #2000, # number of frames per batch, default=500
# non rigid registration settings
'nonrigid': False, # whether to use nonrigid registration
# cell extraction
'denoise': False,
'threshold_scaling': 0.8, # adjust the automatically determined threshold by this scalar multiplier, was 1. (WH) # 0.6 for low signal, default 5
'sparse_mode': False,
'max_iterations': 50, # usualy stops at threshold scaling, default 20
'high_pass': 100, # running mean subtraction with window of size 'high_pass' (use low values for 1P), default 100
# custom settings
'remove_artifacts': (105,512-105)
}
def make_db(animalid, date, expt_ids, result_base, tiff_base):
save_path0 = os.path.join(result_base, animalid, date, '_'.join(expt_ids))
data_path = [os.path.join(tiff_base, animalid, date, lbl) for lbl in expt_ids]
with ScanImageTiffReader(glob.glob(data_path[0]+'/*.tif')[0]) as reader:
metadata = reader.metadata()
db = {
'save_path0': save_path0,
'data_path': data_path,
'nchannels': len(metadata.split('channelSave = [')[1].split(']')[0].split(';')),
'nplanes': len(metadata.split('hStackManager.zs = [')[1].split(']')[0].split(' ')),
# 'nplanes': 1,
'fs': float(metadata.split('scanVolumeRate = ')[1].split('\n')[0])
}
return db
def process_data(ops, db):
fast_disk = ops['fast_disk'] + '/suite2p'
try:
shutil.rmtree(fast_disk)
print('fast disk contents deleted.')
except:
print('fast disk location empty.')
print('Starting suite2p...')
run_s2p(ops=ops,db=db)
if cleanup_fask_disk:
print('emptying contents of temp fast disk folder...', end=' ')
try:
shutil.rmtree(fast_disk)
print('done.')
except:
print('failed to clean up fast disk!')
print('suite2p finished.')
if __name__ == '__main__':
db = make_db(animalid, date, expt_ids, result_base, tiff_base)
process_data(ops=default_ops, db=db)
| 30.412371
| 148
| 0.63661
|
8cb0afa18c8bfc90dc657f15aad5a0050f5638eb
| 217
|
py
|
Python
|
ui.py
|
hmiyake/my-test-repo
|
42118133888b0a74c0da9e01211acb4023f713f9
|
[
"MIT"
] | null | null | null |
ui.py
|
hmiyake/my-test-repo
|
42118133888b0a74c0da9e01211acb4023f713f9
|
[
"MIT"
] | 2
|
2021-07-30T06:32:41.000Z
|
2021-08-01T18:54:45.000Z
|
ui.py
|
hmiyake/my-test-repo
|
42118133888b0a74c0da9e01211acb4023f713f9
|
[
"MIT"
] | null | null | null |
#!/bin/env python
import sys
import calc
def main():
val1 = sys.argv[1]
val2 = sys.argv[2]
val1 = float(val1)
val2 = float(val2)
print calc.calc_wari(val1,val2)
if __name__ == "__main__":
main()
| 12.055556
| 33
| 0.626728
|
f5d50e2f634c8a3a873f8a4cd041d67e07663617
| 5,805
|
py
|
Python
|
examples/raspberry-pi-garage-watch/main.py
|
carrasti/pi-garage-watch
|
878719e11278caf3a9529efa68fa35a134f4fd6d
|
[
"MIT"
] | null | null | null |
examples/raspberry-pi-garage-watch/main.py
|
carrasti/pi-garage-watch
|
878719e11278caf3a9529efa68fa35a134f4fd6d
|
[
"MIT"
] | 6
|
2020-12-29T13:59:28.000Z
|
2022-01-13T03:29:18.000Z
|
examples/raspberry-pi-garage-watch/main.py
|
carrasti/garage-watch
|
878719e11278caf3a9529efa68fa35a134f4fd6d
|
[
"MIT"
] | null | null | null |
"""
Example on how to integrate the garage_watch module with
a Raspberry PI equipped with a Raspberry Pi camera, a door
sensor and a cancel button.
It will use the following specific libraries for the
Raspberry Pi
gpiozero
RPI.GPIO # backend for gpiozero
picamera
requests
twisted
scp (+ paramiko)
"""
import logging
import os
import argparse
from logging.handlers import TimedRotatingFileHandler
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from garage_watch_rpi.camera_controller import GarageCameraController
from garage_watch_rpi.sensor_control import SensorControl
from garage_watch_rpi.parking_controller_led import LEDParkingController
from garage_watch_rpi.clock_controller import ClockController
from garage_watch_rpi.mqtt_controller import MQTTService
# logger for the script
logger = logging.getLogger(__name__)
# environment variables with secrets
def main():
"""
The main execution function for this script
"""
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--snapshot-dir",
default='',
type=str,
help="the base directory for snapshots")
parser.add_argument(
"--video-dir",
type=str,
default='',
help="the base directory for storing video recordings")
parser.add_argument(
"--log-dir",
type=str,
default='console',
help="the base directory to store logs")
parser.add_argument(
"--upload-url",
type=str,
default='',
help="the url to send the file to")
parser.add_argument(
"--upload-auth-jwk-path",
type=str,
default='',
help="the path to serialized jwk for jwt authentication")
args = parser.parse_args()
logconfig_kwargs = {}
if args.log_dir != 'console':
# configure logging
# rotating file handler, 5 MB per file
trfh = TimedRotatingFileHandler(
os.path.join(args.log_dir, 'garage-camera.log'),
when='midnight',
backupCount=14,
)
logconfig_kwargs['handlers'] = [trfh]
# set up logging to print to console
logging.basicConfig(
format='[%(levelname)s][%(asctime)s][%(name)s] %(message)s',
level=logging.INFO,
**logconfig_kwargs
)
# disable logging of transitions module
logging.getLogger('transitions').setLevel(logging.ERROR)
# disable logging of scp module
logging.getLogger('twisted').setLevel(logging.INFO)
mqtt_service = MQTTService(reactor)
mqtt_service.startService()
# default value for door on connected
mqtt_service.whenConnected().addCallback(lambda *args: mqtt_service.report_door_closed())
PUSHBULLET_SECRET = os.environ.get('PUSHBULLET_SECRET', None)
if not PUSHBULLET_SECRET:
logger.warning(
"PUSHBULLET_SECRET environment variable not set. Messages will not be sent")
if not args.upload_url or not args.upload_auth_jwk_path:
logger.warning(
"Upload improperly configured. Snapshots will not be uploaded")
# create the camera controller instance
cam_control = GarageCameraController()
# configure output dirs
cam_control.snapshot_dir = args.snapshot_dir
cam_control.video_dir = args.video_dir
cam_control.upload_url = args.upload_url
cam_control.upload_auth_jwk_path = args.upload_auth_jwk_path
cam_control.pushbullet_secret = PUSHBULLET_SECRET
# configure periodically taking a picture
def periodic_take_picture():
picture_stream = cam_control.take_picture()
cam_control.upload_picture(picture_stream)
cam_control.save_picture(picture_stream)
def periodic_report_door_status():
# report status of door open based on door sensor
if not mqtt_service.connected or not sc:
return
if sc.is_door_open():
mqtt_service.report_door_open()
else:
mqtt_service.report_door_closed()
lc = LoopingCall(periodic_take_picture)
lc.start(60)
lc = LoopingCall(periodic_report_door_status)
lc.start(60)
# define the matrix for parking
parking_control = LEDParkingController(rotation=180, i2c_address=0x70)
def parking_control_status_changed(data, *args, **kwargs):
new_state, old_state = data[0]
ev = parking_control.events_dict.get(new_state)
if not ev:
return
# debug
# print(ev)
# send the event
fn = getattr(parking_control, ev)
fn()
def parking_control_update_callback(sc, *args, **kwargs):
# DEBUG:
# print(sc.parking_distance)
# parking_control.write_amounts(sc.parking_distance[0], sc.parking_distance[1])
pass
def door_open_handler(*args, **kwargs):
cam_control.door_open()
mqtt_service.report_door_open()
def door_close_handler(*args, **kwargs):
cam_control.door_closed()
mqtt_service.report_door_closed()
def override_button_handler(*args, **kwargs):
cam_control.cancel_requested()
sc = SensorControl(0x27)
sc.add_event_handler('parking_status_changed', parking_control_status_changed, sc=sc)
sc.add_parking_data_update_callback(parking_control_update_callback)
sc.add_event_handler('door_closed', door_close_handler)
sc.add_event_handler('door_open', door_open_handler)
sc.add_event_handler('override_button_pressed', override_button_handler)
# clock with PIR sensor in pin 22 and led in 0x71
cc = ClockController(22, 0x71)
sc.start()
# and kick off the reactor
reactor.run()
if __name__ == "__main__":
main()
| 29.92268
| 93
| 0.682171
|
ade8bcbeb45d800bac0678f9d2f377a6b7cbc8b5
| 9,751
|
py
|
Python
|
test/integration/framework/nsh-lwm2m/lwm2m/tlv.py
|
dextero/Anjay
|
968bec079207315bba27bc59bd59f6d17a65d80d
|
[
"Apache-2.0"
] | null | null | null |
test/integration/framework/nsh-lwm2m/lwm2m/tlv.py
|
dextero/Anjay
|
968bec079207315bba27bc59bd59f6d17a65d80d
|
[
"Apache-2.0"
] | null | null | null |
test/integration/framework/nsh-lwm2m/lwm2m/tlv.py
|
dextero/Anjay
|
968bec079207315bba27bc59bd59f6d17a65d80d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import typing
def indent(text, indent=' '):
return indent + ('\n' + indent).join(text.split('\n'))
class TLVType:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, TLVType):
return self.value == other.value
else:
return self.value == other
def __str__(self):
matching = [name for name, value in TLVType.__dict__.items()
if isinstance(value, TLVType) and value.value == self.value]
assert len(matching) == 1
return matching[0]
TLVType.INSTANCE = TLVType(0)
TLVType.RESOURCE_INSTANCE = TLVType(1)
TLVType.MULTIPLE_RESOURCE = TLVType(2)
TLVType.RESOURCE = TLVType(3)
class TLVList(list):
def __str__(self):
"""
A list of TLVs used as a result of TLV.parse, with a custom __str__ function
for convenience.
"""
return 'TLV (%d elements):\n\n' % len(self) + indent('\n'.join(x.full_description() for x in self))
class TLV:
class BytesDispenser:
def __init__(self, data):
self.data = data
self.at = 0
def take(self, n):
if self.at + n > len(self.data):
raise IndexError('attempted to take %d bytes, but only %d available'
% (n, len(self.data) - self.at))
self.at += n
return self.data[self.at - n:self.at]
def bytes_remaining(self):
return len(self.data) - self.at
@staticmethod
def encode_int(data):
value = int(data)
for n, modifier in zip([8, 16, 32, 64], ['b', 'h', 'i', 'q']):
if -2 ** (n - 1) <= value < 2 ** (n - 1):
return struct.pack('>%s' % modifier, value)
raise NotImplementedError("integer out of supported range")
@staticmethod
def encode_double(data):
return struct.pack('>d', float(data))
@staticmethod
def encode_float(data):
return struct.pack('>f', float(data))
@staticmethod
def make_instance(instance_id: int,
content: typing.Iterable['TLV'] = None):
"""
Creates an Object Instance TLV.
instance_id -- ID of the Object Instance
resources -- serialized list of TLV resources
"""
return TLV(TLVType.INSTANCE, instance_id, content or [])
@staticmethod
def _encode_resource_value(content: int or float or str or bytes):
if isinstance(content, int):
content = TLV.encode_int(content)
elif isinstance(content, float):
as_float = TLV.encode_float(content)
if struct.unpack('>f', as_float)[0] == content:
content = as_float # single precision is enough
else:
content = TLV.encode_double(content)
elif isinstance(content, str):
content = content.encode('ascii')
if not isinstance(content, bytes):
raise ValueError('Unsupported resource value type: ' + type(content).__name__)
return content
@staticmethod
def make_resource(resource_id: int,
content: int or float or str or bytes):
"""
Creates a Resource TLV.
resource_id -- ID of the Resource
content -- Resource content. If an integer is passed, its U2-encoded
form is used. Strings are ASCII-encoded.
"""
return TLV(TLVType.RESOURCE, resource_id, TLV._encode_resource_value(content))
@staticmethod
def make_multires(resource_id, instances):
"""
Encodes Multiple Resource Instances and their values in TLV
resource_id -- ID of Resource to be encoded
instances -- list of tuples, each of form (Resource ID, Value)
"""
children = []
for riid, value in instances:
children.append(TLV(TLVType.RESOURCE_INSTANCE, int(riid),
TLV._encode_resource_value(value)))
return TLV(TLVType.MULTIPLE_RESOURCE, int(resource_id), children)
@staticmethod
def _parse_internal(data):
type_byte, = struct.unpack('!B', data.take(1))
tlv_type = TLVType((type_byte >> 6) & 0b11)
id_field_size = (type_byte >> 5) & 0b1
length_field_size = (type_byte >> 3) & 0b11
identifier_bits = b'\x00' + data.take(1 + id_field_size)
identifier, = struct.unpack('!H', identifier_bits[-2:])
if length_field_size == 0:
length = type_byte & 0b111
else:
length_bits = b'\x00' * 3 + data.take(length_field_size)
length, = struct.unpack('!I', length_bits[-4:])
if tlv_type == TLVType.RESOURCE:
return TLV(tlv_type, identifier, data.take(length))
elif tlv_type == TLVType.RESOURCE_INSTANCE:
return TLV(tlv_type, identifier, data.take(length))
elif tlv_type == TLVType.MULTIPLE_RESOURCE:
res_instances = []
data = TLV.BytesDispenser(data.take(length))
while data.bytes_remaining() > 0:
res_instances.append(TLV._parse_internal(data))
if not all(x.tlv_type == TLVType.RESOURCE_INSTANCE for x in res_instances):
raise ValueError('not all parsed objects are Resource Instances')
return TLV(tlv_type, identifier, res_instances)
elif tlv_type == TLVType.INSTANCE:
resources = []
data = TLV.BytesDispenser(data.take(length))
while data.bytes_remaining() > 0:
resources.append(TLV._parse_internal(data))
if data.bytes_remaining() > 0:
raise ValueError('stray bytes at end of data')
if not all(x.tlv_type in (TLVType.RESOURCE, TLVType.MULTIPLE_RESOURCE) for x in resources):
raise ValueError('not all parsed objects are Resources')
return TLV(tlv_type, identifier, resources)
@staticmethod
def parse(data) -> TLVList:
data = TLV.BytesDispenser(data)
result = TLVList()
while data.bytes_remaining() > 0:
result.append(TLV._parse_internal(data))
return result
def __init__(self, tlv_type, identifier, value):
self.tlv_type = tlv_type
self.identifier = identifier
self.value = value
def serialize(self):
if self.tlv_type in (TLVType.RESOURCE, TLVType.RESOURCE_INSTANCE):
data = self.value
else:
data = b''.join(x.serialize() for x in self.value)
type_field = (self.tlv_type.value << 6)
id_bytes = b''
if self.identifier < 2 ** 8:
id_bytes = struct.pack('!B', self.identifier)
else:
assert self.identifier < 2 ** 16
type_field |= 0b100000
id_bytes = struct.pack('!H', self.identifier)
len_bytes = b''
if len(data) < 8:
type_field |= len(data)
elif len(data) < 2 ** 8:
type_field |= 0b01000
len_bytes = struct.pack('!B', len(data))
elif len(data) < 2 ** 16:
type_field |= 0b10000
len_bytes = struct.pack('!H', len(data))
else:
assert len(data) < 2 ** 24
type_field |= 0b11000
len_bytes = struct.pack('!I', len(data))[1:]
return struct.pack('!B', type_field) + id_bytes + len_bytes + data
def _get_resource_value(self):
assert self.tlv_type in (TLVType.RESOURCE, TLVType.RESOURCE_INSTANCE)
value = str(self.value)
if len(self.value) <= 8:
value += ' (int: %d' % struct.unpack('>Q', (bytes(8) + self.value)[-8:])[0]
if len(self.value) == 4:
value += ', float: %f' % struct.unpack('>f', self.value)[0]
elif len(self.value) == 8:
value += ', double: %f' % struct.unpack('>d', self.value)[0]
value += ')'
return value
def __str__(self):
if self.tlv_type == TLVType.INSTANCE:
return 'instance %d (%d resources)' % (self.identifier, len(self.value))
elif self.tlv_type == TLVType.MULTIPLE_RESOURCE:
return 'multiple resource %d (%d instances)' % (self.identifier, len(self.value))
elif self.tlv_type == TLVType.RESOURCE_INSTANCE:
return 'resource instance %d = %s' % (self.identifier, self._get_resource_value())
elif self.tlv_type == TLVType.RESOURCE:
return 'resource %d = %s' % (self.identifier, self._get_resource_value())
def full_description(self):
if self.tlv_type == TLVType.INSTANCE:
return ('instance %d (%d resources)\n%s'
% (self.identifier, len(self.value), indent('\n'.join(x.full_description() for x in self.value))))
elif self.tlv_type == TLVType.MULTIPLE_RESOURCE:
return ('multiple resource %d (%d instances)\n%s'
% (self.identifier, len(self.value), indent('\n'.join(x.full_description() for x in self.value))))
else:
return str(self)
| 35.849265
| 118
| 0.590196
|
535468bf4b59d82f72bd0d24ea4d6a2b37133add
| 1,949
|
py
|
Python
|
kde/unreleased/kaidan/kaidan.py
|
lnjX/craft-blueprints-kde
|
08dc4a6c5873c362504a9454ddbf8e965e26d3c2
|
[
"BSD-2-Clause"
] | null | null | null |
kde/unreleased/kaidan/kaidan.py
|
lnjX/craft-blueprints-kde
|
08dc4a6c5873c362504a9454ddbf8e965e26d3c2
|
[
"BSD-2-Clause"
] | null | null | null |
kde/unreleased/kaidan/kaidan.py
|
lnjX/craft-blueprints-kde
|
08dc4a6c5873c362504a9454ddbf8e965e26d3c2
|
[
"BSD-2-Clause"
] | null | null | null |
import info
class subinfo(info.infoclass):
def setTargets(self):
self.description = "A simple, user-friendly Jabber/XMPP client for every device!"
self.displayName = "Kaidan"
self.svnTargets['master'] = 'https://anongit.kde.org/kaidan.git'
for ver in ["0.4.1"]:
self.targets[ver] = "https://download.kde.org/stable/kaidan/{}/kaidan-{}.tar.xz".format(ver, ver)
self.archiveNames[ver] = "kaidan-v{}.tar.xz".format(ver)
self.targetInstSrc[ver] = "kaidan-{}".format(ver)
self.targetDigests['0.4.1'] = (
['a9660e2b9c9d9ac6802f7de9a8e1d29a6d552beffcafca27231682bf1038e03c'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = '0.4.1'
def setDependencies(self):
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/qt5/qtdeclarative"] = None
self.runtimeDependencies["libs/qt5/qtmultimedia"] = None
self.runtimeDependencies["libs/qt5/qtsvg"] = None
self.runtimeDependencies["libs/zxing-cpp"] = None
self.runtimeDependencies["qt-libs/qxmpp"] = None
self.runtimeDependencies["kde/frameworks/tier1/kirigami"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
def createPackage(self):
self.defines["executable"] = r"bin\kaidan.exe"
# okular icons
#self.defines["icon"] = os.path.join(self.packageDir(), "okular.ico")
#self.defines["icon_png"] = os.path.join(self.packageDir(), ".assets", "150-apps-okular.png")
#self.defines["icon_png_44"] = os.path.join(self.packageDir(), ".assets", "44-apps-okular.png")
# this requires an 310x150 variant in addition!
#self.defines["icon_png_310x310"] = os.path.join(self.packageDir(), ".assets", "310-apps-okular.png")
return TypePackager.createPackage(self)
| 42.369565
| 113
| 0.661365
|
f7742dd224f99c8404ce1d3d3b5c26ffbce6dc8e
| 1,483
|
py
|
Python
|
azure/mgmt/network/v2017_06_01/models/express_route_circuit_sku.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/network/v2017_06_01/models/express_route_circuit_sku.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2017_06_01/models/express_route_circuit_sku.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium', 'Transport'
:type tier: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, name=None, tier=None, family=None):
self.name = name
self.tier = tier
self.family = family
| 37.075
| 79
| 0.608901
|
20849854b1be38dd2d1bf72d7b64b3e6c70aa677
| 604
|
py
|
Python
|
Scripts/Mechatronic-II_Final-Year-Project/code/CutterToggle.py
|
WilliamMokoena/portfolio
|
8176d81e669279510af9fc7ff98aa769603cff60
|
[
"MIT"
] | null | null | null |
Scripts/Mechatronic-II_Final-Year-Project/code/CutterToggle.py
|
WilliamMokoena/portfolio
|
8176d81e669279510af9fc7ff98aa769603cff60
|
[
"MIT"
] | null | null | null |
Scripts/Mechatronic-II_Final-Year-Project/code/CutterToggle.py
|
WilliamMokoena/portfolio
|
8176d81e669279510af9fc7ff98aa769603cff60
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as gpio
class toggle:
def LaserCutterToggle(self, OnOff):
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(10, gpio.OUT)
if OnOff == 'ON':
gpio.output(10, gpio.HIGH)
if OnOff == 'OFF':
gpio.output(10, gpio.LOW)
def WaterJetCutterToggle(self, OnOff):
gpio.setmode(gpio.BOARD)
gpio.setwarnings(False)
gpio.setup(8, gpio.OUT)
if OnOff == 'ON':
gpio.output(8, gpio.HIGH)
if OnOff == 'OFF':
gpio.output(8, gpio.LOW)
| 24.16
| 43
| 0.529801
|
18db4c9e0ea9d32b88f6626e6691d752ebc4e836
| 3,530
|
py
|
Python
|
virtual/lib/python3.6/site-packages/dns/rdtypes/ANY/TSIG.py
|
amoskipz/pitch
|
477599a56958bc677e22764d7e0cc14d34510e8c
|
[
"Unlicense",
"MIT"
] | 2
|
2021-07-26T15:04:07.000Z
|
2021-07-26T17:23:08.000Z
|
virtual/lib/python3.6/site-packages/dns/rdtypes/ANY/TSIG.py
|
amoskipz/pitch
|
477599a56958bc677e22764d7e0cc14d34510e8c
|
[
"Unlicense",
"MIT"
] | 30
|
2020-07-31T05:23:33.000Z
|
2022-03-25T11:04:00.000Z
|
virtual/lib/python3.6/site-packages/dns/rdtypes/ANY/TSIG.py
|
amoskipz/pitch
|
477599a56958bc677e22764d7e0cc14d34510e8c
|
[
"Unlicense",
"MIT"
] | 2
|
2021-02-27T09:46:02.000Z
|
2021-08-06T03:12:20.000Z
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2001-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
class TSIG(dns.rdata.Rdata):
"""TSIG record"""
__slots__ = ['algorithm', 'time_signed', 'fudge', 'mac',
'original_id', 'error', 'other']
def __init__(self, rdclass, rdtype, algorithm, time_signed, fudge, mac,
original_id, error, other):
"""Initialize a TSIG rdata.
*rdclass*, an ``int`` is the rdataclass of the Rdata.
*rdtype*, an ``int`` is the rdatatype of the Rdata.
*algorithm*, a ``dns.name.Name``.
*time_signed*, an ``int``.
*fudge*, an ``int`.
*mac*, a ``bytes``
*original_id*, an ``int``
*error*, an ``int``
*other*, a ``bytes``
"""
super().__init__(rdclass, rdtype)
object.__setattr__(self, 'algorithm', algorithm)
object.__setattr__(self, 'time_signed', time_signed)
object.__setattr__(self, 'fudge', fudge)
object.__setattr__(self, 'mac', dns.rdata._constify(mac))
object.__setattr__(self, 'original_id', original_id)
object.__setattr__(self, 'error', error)
object.__setattr__(self, 'other', dns.rdata._constify(other))
def to_text(self, origin=None, relativize=True, **kw):
algorithm = self.algorithm.choose_relativity(origin, relativize)
return f"{algorithm} {self.fudge} {self.time_signed} " + \
f"{len(self.mac)} {dns.rdata._base64ify(self.mac, 0)} " + \
f"{self.original_id} {self.error} " + \
f"{len(self.other)} {dns.rdata._base64ify(self.other, 0)}"
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
self.algorithm.to_wire(file, None, origin, False)
file.write(struct.pack('!HIHH',
(self.time_signed >> 32) & 0xffff,
self.time_signed & 0xffffffff,
self.fudge,
len(self.mac)))
file.write(self.mac)
file.write(struct.pack('!HHH', self.original_id, self.error,
len(self.other)))
file.write(self.other)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
algorithm = parser.get_name(origin)
(time_hi, time_lo, fudge) = parser.get_struct('!HIH')
time_signed = (time_hi << 32) + time_lo
mac = parser.get_counted_bytes(2)
(original_id, error) = parser.get_struct('!HH')
other = parser.get_counted_bytes(2)
return cls(rdclass, rdtype, algorithm, time_signed, fudge, mac,
original_id, error, other)
| 38.369565
| 77
| 0.624363
|
463214a58745bc450a7bac5069f1b8f33e04b2a7
| 7,939
|
py
|
Python
|
onmt/model_builder.py
|
yapdianang/fast_transformer
|
3c47e67dd98a3c5642304d5c7d8343618ada1f24
|
[
"MIT"
] | null | null | null |
onmt/model_builder.py
|
yapdianang/fast_transformer
|
3c47e67dd98a3c5642304d5c7d8343618ada1f24
|
[
"MIT"
] | null | null | null |
onmt/model_builder.py
|
yapdianang/fast_transformer
|
3c47e67dd98a3c5642304d5c7d8343618ada1f24
|
[
"MIT"
] | null | null | null |
"""
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.decoders import str2dec
from onmt.modules import Embeddings, CopyGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
def build_embeddings(opt, text_field, for_encoder=True):
"""
Args:
opt: the option in current environment.
text_field(TextMultiField): word and feats field.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
pad_indices = [f.vocab.stoi[f.pad_token] for _, f in text_field]
word_padding_idx, feat_pad_indices = pad_indices[0], pad_indices[1:]
num_embs = [len(f.vocab) for _, f in text_field]
num_word_embeddings, num_feat_embeddings = num_embs[0], num_embs[1:]
fix_word_vecs = opt.fix_word_vecs_enc if for_encoder \
else opt.fix_word_vecs_dec
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam",
fix_word_vecs=fix_word_vecs
)
return emb
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
enc_type = opt.encoder_type if opt.model_type == "text" else opt.model_type
return str2enc[enc_type].from_opt(opt, embeddings)
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
dec_type = "ifrnn" if opt.decoder_type == "rnn" and opt.input_feed \
else opt.decoder_type
return str2dec[dec_type].from_opt(opt, embeddings)
def load_test_model(opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint['opt'])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
vocab = checkpoint['vocab']
if inputters.old_style_vocab(vocab):
fields = inputters.load_old_vocab(
vocab, opt.data_type, dynamic_dict=model_opt.copy_attn
)
else:
fields = vocab
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint,
opt.gpu)
if opt.fp32:
model.float()
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None, gpu_id=None):
"""Build a model from opts.
Args:
model_opt: the option loaded from checkpoint. It's important that
the opts have been updated and validated. See
:class:`onmt.utils.parse.ArgumentParser`.
fields (dict[str, torchtext.data.Field]):
`Field` objects for the model.
gpu (bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
gpu_id (int or NoneType): Which GPU to use.
Returns:
the NMTModel.
"""
# Build embeddings.
if model_opt.model_type == "text":
src_field = fields["src"]
src_emb = build_embeddings(model_opt, src_field)
else:
src_emb = None
# Build encoder.
encoder = build_encoder(model_opt, src_emb)
# Build decoder.
tgt_field = fields["tgt"]
tgt_emb = build_embeddings(model_opt, tgt_field, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert src_field.base_field.vocab == tgt_field.base_field.vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb)
# Build NMTModel(= encoder + decoder).
if gpu and gpu_id is not None:
device = torch.device("cuda", gpu_id)
elif gpu and not gpu_id:
device = torch.device("cuda")
elif not gpu:
device = torch.device("cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size,
len(fields["tgt"].base_field.vocab)),
Cast(torch.float32),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
tgt_base_field = fields["tgt"].base_field
vocab_size = len(tgt_base_field.vocab)
pad_idx = tgt_base_field.vocab.stoi[tgt_base_field.pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx, model_opt.conv_first)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec)
model.generator = generator
model.to(device)
if model_opt.model_dtype == 'fp16':
model.half()
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
return model
| 34.820175
| 100
| 0.655624
|
e19ab35cff7f08cbe5d6b8f207e910e8cc30189b
| 27
|
py
|
Python
|
test.py
|
catjacks38/FCC-GAN
|
b5e10e8249d5bba1aca75ab0081c6b54921c3ddc
|
[
"MIT"
] | 2
|
2021-10-31T20:54:27.000Z
|
2021-10-31T20:54:30.000Z
|
test.py
|
catjacks38/FCC-GAN
|
b5e10e8249d5bba1aca75ab0081c6b54921c3ddc
|
[
"MIT"
] | null | null | null |
test.py
|
catjacks38/FCC-GAN
|
b5e10e8249d5bba1aca75ab0081c6b54921c3ddc
|
[
"MIT"
] | null | null | null |
import model
model.test()
| 6.75
| 12
| 0.740741
|
c70724be2a7b5d97bb1957043848e0c86a2ec383
| 1,050
|
py
|
Python
|
example.py
|
developius/ControlRoom
|
004b7b62f7af4d8d82b44d18a21228cb3afdcecd
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
developius/ControlRoom
|
004b7b62f7af4d8d82b44d18a21228cb3afdcecd
|
[
"Apache-2.0"
] | null | null | null |
example.py
|
developius/ControlRoom
|
004b7b62f7af4d8d82b44d18a21228cb3afdcecd
|
[
"Apache-2.0"
] | null | null | null |
import mcpi.minecraft as minecraft
import mcpi.block as block
from controlroom import ControlRoom
import time
mc = minecraft.Minecraft.create()
cr = ControlRoom(mc)
# Hunger Games-like tree chase (try changing the threat to "lava" or "water"
#mc.postToChat("We're hunting one of you down...")
#duration = cr.chase(block.OBSIDIAN.id, block.GLOWING_OBSIDIAN.id, randomplayer=True, threat="trees")
#print("You lasted %i seconds!" % duration)
#mc.postToChat("You lasted %i seconds!" % duration)
# This is Queen Elsa running across water
#while True:
# pos = mc.player.getPos()
# b = mc.getBlock(pos.x, pos.y-1, pos.z)
# if (b == block.WATER.id or b == block.WATER_STATIONARY.id):
# mc.setBlocks(pos.x-1, pos.y-1, pos.z-1, pos.x+1, pos.y-1, pos.z+1, block.ICE.id)
# fly player around in a circle at circle origin (0,0), height 20 and radius 50
#circlePath = cr.circlePath(0, 20, 0, 50)
#mc.camera.setFollow(mc.getPlayerEntityIds()[0])
#while True:
# for coords in circlePath['coords']:
# mc.player.setPos(coords)
# time.sleep(0.025)
| 36.206897
| 101
| 0.707619
|
c4e8768f79f349645298dd8ccf551834b40a1cdf
| 394
|
py
|
Python
|
core/migrations/0009_player_password.py
|
Gabe-Corral/Clone-Labs
|
2de59b83899aa4f0de761207c62ad02fc9323611
|
[
"MIT"
] | null | null | null |
core/migrations/0009_player_password.py
|
Gabe-Corral/Clone-Labs
|
2de59b83899aa4f0de761207c62ad02fc9323611
|
[
"MIT"
] | null | null | null |
core/migrations/0009_player_password.py
|
Gabe-Corral/Clone-Labs
|
2de59b83899aa4f0de761207c62ad02fc9323611
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-06-02 22:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_remove_player_user'),
]
operations = [
migrations.AddField(
model_name='player',
name='password',
field=models.CharField(max_length=50, null=True),
),
]
| 20.736842
| 61
| 0.598985
|
cb8b4d29138bb84e4e81b77846fb79d46cf71ba0
| 11,407
|
py
|
Python
|
tests/handlers/test_typing.py
|
skalarproduktraum/synapse
|
c831748f4d243d74e9a3fd2042bc2b35cc30f961
|
[
"Apache-2.0"
] | 1
|
2019-06-22T04:17:50.000Z
|
2019-06-22T04:17:50.000Z
|
tests/handlers/test_typing.py
|
skalarproduktraum/synapse
|
c831748f4d243d74e9a3fd2042bc2b35cc30f961
|
[
"Apache-2.0"
] | null | null | null |
tests/handlers/test_typing.py
|
skalarproduktraum/synapse
|
c831748f4d243d74e9a3fd2042bc2b35cc30f961
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import ANY, Mock, call
from twisted.internet import defer
from synapse.api.errors import AuthError
from synapse.types import UserID
from tests import unittest
from tests.utils import register_federation_servlets
# Some local users to test with
U_APPLE = UserID.from_string("@apple:test")
U_BANANA = UserID.from_string("@banana:test")
# Remote user
U_ONION = UserID.from_string("@onion:farm")
# Test room id
ROOM_ID = "a-room"
def _expect_edu_transaction(edu_type, content, origin="test"):
return {
"origin": origin,
"origin_server_ts": 1000000,
"pdus": [],
"edus": [{"edu_type": edu_type, "content": content}],
}
def _make_edu_transaction_json(edu_type, content):
return json.dumps(_expect_edu_transaction(edu_type, content)).encode('utf8')
class TypingNotificationsTestCase(unittest.HomeserverTestCase):
servlets = [register_federation_servlets]
def make_homeserver(self, reactor, clock):
# we mock out the keyring so as to skip the authentication check on the
# federation API call.
mock_keyring = Mock(spec=["verify_json_for_server"])
mock_keyring.verify_json_for_server.return_value = defer.succeed(True)
# we mock out the federation client too
mock_federation_client = Mock(spec=["put_json"])
mock_federation_client.put_json.return_value = defer.succeed((200, "OK"))
hs = self.setup_test_homeserver(
datastore=(
Mock(
spec=[
# Bits that Federation needs
"prep_send_transaction",
"delivered_txn",
"get_received_txn_response",
"set_received_txn_response",
"get_destination_retry_timings",
"get_devices_by_remote",
# Bits that user_directory needs
"get_user_directory_stream_pos",
"get_current_state_deltas",
]
)
),
notifier=Mock(),
http_client=mock_federation_client,
keyring=mock_keyring,
)
return hs
def prepare(self, reactor, clock, hs):
# the tests assume that we are starting at unix time 1000
reactor.pump((1000,))
mock_notifier = hs.get_notifier()
self.on_new_event = mock_notifier.on_new_event
self.handler = hs.get_typing_handler()
self.event_source = hs.get_event_sources().sources["typing"]
self.datastore = hs.get_datastore()
retry_timings_res = {"destination": "", "retry_last_ts": 0, "retry_interval": 0}
self.datastore.get_destination_retry_timings.return_value = defer.succeed(
retry_timings_res
)
self.datastore.get_devices_by_remote.return_value = (0, [])
def get_received_txn_response(*args):
return defer.succeed(None)
self.datastore.get_received_txn_response = get_received_txn_response
self.room_members = []
def check_joined_room(room_id, user_id):
if user_id not in [u.to_string() for u in self.room_members]:
raise AuthError(401, "User is not in the room")
hs.get_auth().check_joined_room = check_joined_room
def get_joined_hosts_for_room(room_id):
return set(member.domain for member in self.room_members)
self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room
def get_current_users_in_room(room_id):
return set(str(u) for u in self.room_members)
hs.get_state_handler().get_current_users_in_room = get_current_users_in_room
self.datastore.get_user_directory_stream_pos.return_value = (
# we deliberately return a non-None stream pos to avoid doing an initial_spam
defer.succeed(1)
)
self.datastore.get_current_state_deltas.return_value = None
self.datastore.get_to_device_stream_token = lambda: 0
self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
def test_started_typing_local(self):
self.room_members = [U_APPLE, U_BANANA]
self.assertEquals(self.event_source.get_current_key(), 0)
self.successResultOf(
self.handler.started_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=20000
)
)
self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])])
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)
def test_started_typing_remote_send(self):
self.room_members = [U_APPLE, U_ONION]
self.successResultOf(
self.handler.started_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=20000
)
)
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
path="/_matrix/federation/v1/send/1000000",
data=_expect_edu_transaction(
"m.typing",
content={
"room_id": ROOM_ID,
"user_id": U_APPLE.to_string(),
"typing": True,
},
),
json_data_callback=ANY,
long_retries=True,
backoff_on_404=True,
try_trailing_slash_on_400=True,
)
def test_started_typing_remote_recv(self):
self.room_members = [U_APPLE, U_ONION]
self.assertEquals(self.event_source.get_current_key(), 0)
(request, channel) = self.make_request(
"PUT",
"/_matrix/federation/v1/send/1000000",
_make_edu_transaction_json(
"m.typing",
content={
"room_id": ROOM_ID,
"user_id": U_ONION.to_string(),
"typing": True,
},
),
federation_auth_origin=b'farm',
)
self.render(request)
self.assertEqual(channel.code, 200)
self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])])
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": ROOM_ID,
"content": {"user_ids": [U_ONION.to_string()]},
}
],
)
def test_stopped_typing(self):
self.room_members = [U_APPLE, U_BANANA, U_ONION]
# Gut-wrenching
from synapse.handlers.typing import RoomMember
member = RoomMember(ROOM_ID, U_APPLE.to_string())
self.handler._member_typing_until[member] = 1002000
self.handler._room_typing[ROOM_ID] = set([U_APPLE.to_string()])
self.assertEquals(self.event_source.get_current_key(), 0)
self.successResultOf(
self.handler.stopped_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID
)
)
self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])])
put_json = self.hs.get_http_client().put_json
put_json.assert_called_once_with(
"farm",
path="/_matrix/federation/v1/send/1000000",
data=_expect_edu_transaction(
"m.typing",
content={
"room_id": ROOM_ID,
"user_id": U_APPLE.to_string(),
"typing": False,
},
),
json_data_callback=ANY,
long_retries=True,
backoff_on_404=True,
try_trailing_slash_on_400=True,
)
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
self.assertEquals(
events[0],
[{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}],
)
def test_typing_timeout(self):
self.room_members = [U_APPLE, U_BANANA]
self.assertEquals(self.event_source.get_current_key(), 0)
self.successResultOf(
self.handler.started_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=10000
)
)
self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])])
self.on_new_event.reset_mock()
self.assertEquals(self.event_source.get_current_key(), 1)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)
self.reactor.pump([16])
self.on_new_event.assert_has_calls([call('typing_key', 2, rooms=[ROOM_ID])])
self.assertEquals(self.event_source.get_current_key(), 2)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=1)
self.assertEquals(
events[0],
[{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}],
)
# SYN-230 - see if we can still set after timeout
self.successResultOf(
self.handler.started_typing(
target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=10000
)
)
self.on_new_event.assert_has_calls([call('typing_key', 3, rooms=[ROOM_ID])])
self.on_new_event.reset_mock()
self.assertEquals(self.event_source.get_current_key(), 3)
events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0)
self.assertEquals(
events[0],
[
{
"type": "m.typing",
"room_id": ROOM_ID,
"content": {"user_ids": [U_APPLE.to_string()]},
}
],
)
| 33.748521
| 89
| 0.591304
|
423960473214712794bd471ec0192cd7d8ce82d7
| 3,637
|
py
|
Python
|
autobrat/annotator.py
|
autogoal/autobrat
|
421677c5db0874ec40080de38f802623a211ba96
|
[
"MIT"
] | 1
|
2022-02-10T12:02:17.000Z
|
2022-02-10T12:02:17.000Z
|
autobrat/annotator.py
|
autogoal/autobrat
|
421677c5db0874ec40080de38f802623a211ba96
|
[
"MIT"
] | null | null | null |
autobrat/annotator.py
|
autogoal/autobrat
|
421677c5db0874ec40080de38f802623a211ba96
|
[
"MIT"
] | 3
|
2021-03-03T19:06:41.000Z
|
2022-03-05T21:15:12.000Z
|
from collections import defaultdict
import collections
from os import sendfile
from typing import Dict, List, Tuple
from math import log2
import spacy
from scripts.utils import Collection, Sentence
from autogoal.ml import AutoML
from autogoal.kb import Sentence, Word, Postag
from autogoal.kb import List as ag_List
import typing as t
from .utils import load_training_entities
from functools import reduce
import logging
import json
logger = logging.getLogger('SentenceAnnotator')
class SentencesAnnotator(object):
nlp = spacy.load('es')
def __init__(self, models: t.List[AutoML], collection_base: Collection,
unique_classes: t.List[str]) -> None:
super().__init__()
self.models = models
self.collection_base = collection_base
self.unique_classes = unique_classes
def predict_prob(self, sentence: Sentence) -> Tuple[Sentence, float]:
text = sentence.text
clasifications = self.get_classifications(sentence.text)
pass
def predict(self, texts: t.List[str]) -> List[List[str]]:
parsed_sentences = [[w.text for w in self.nlp(text)] for text in texts]
ans = []
for sentence in parsed_sentences:
ans.append([])
for classifier in self.models:
prediction = classifier.predict([sentence])
ans[-1].append(prediction[0])
return ans
def get_classifications(self, text: str):
parsed_sentence = [w.text for w in self.nlp(text)]
print(parsed_sentence)
ans = []
for classifier in self.models:
prediction = classifier.predict([parsed_sentence])
ans.append(prediction[0])
return ans
def get_probs(self,
predictions: t.List[float]) -> t.List[Dict[str, float]]:
size = len(predictions[0])
ans = [defaultdict(lambda: 0) for i in range(size)]
for prediction in predictions:
for i, categorie in enumerate(prediction):
ans[i][categorie] += 1 / len(predictions)
return ans
def get_entropy(self, probs: t.List[Dict[str, float]]):
return sum(-1 * sum([word * log2(word) for word in words.values()])
for words in probs)
def get_entropy_bulk(self, sentences: t.List[str]) -> List[float]:
predictions = self.predict(sentences)
logger.info(json.dumps(predictions))
probs = [self.get_probs(p) for p in predictions]
logger.info(json.dumps(probs))
entropy = [self.get_entropy(p) for p in probs]
logger.info(json.dumps(entropy))
return entropy
@staticmethod
def generated_classifier_from_dataset(data: Collection,
number_of_models: int = 5):
models = []
lines, classes = load_training_entities(data)
unique_clases = reduce(lambda x, y: x | y, [set(c) for c in classes])
for _ in range(number_of_models):
classifier = AutoML(
input=ag_List(ag_List(Word())),
output=ag_List(ag_List(Postag())),
)
classifier.fit([[w.text for w in l] for l in lines], classes)
models.append(classifier)
return SentencesAnnotator(models=models,
collection_base=data,
unique_classes=unique_clases)
def fit(self, data: Collection):
lines, classes = load_training_entities(data)
lines = [[w.text for w in l] for l in lines]
for model in self.models:
model.fit(lines, classes)
| 32.765766
| 79
| 0.617542
|
91bbbffc353bc10d5cc9358f5031d4802aa8aad2
| 2,408
|
py
|
Python
|
pyscf/nao/test/test_0004_log_mesh.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 3
|
2021-02-28T00:52:53.000Z
|
2021-03-01T06:23:33.000Z
|
pyscf/nao/test/test_0004_log_mesh.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/nao/test/test_0004_log_mesh.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest
from pyscf import gto
from pyscf.nao.log_mesh import log_mesh
mol = gto.M(
verbose = 1,
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = 'cc-pvdz',
)
class KnowValues(unittest.TestCase):
def test_log_mesh_gto(self):
""" Test construction of log mesh for GTOs"""
lm = log_mesh(gto=mol, rmin=1e-6)
self.assertEqual(lm.nr, 1024)
self.assertAlmostEqual(lm.rr[0], 1e-6)
self.assertAlmostEqual(lm.rr[-1], 11.494152344675497)
self.assertAlmostEqual(lm.pp[-1], 644.74911990708938)
self.assertAlmostEqual(lm.pp[0], 5.6093664027844639e-05)
def test_log_mesh_ion(self):
from pyscf.nao.m_siesta_ion_xml import siesta_ion_xml
import os
dname = os.path.dirname(os.path.abspath(__file__))
sp2ion = []
sp2ion.append(siesta_ion_xml(dname+'/O.ion.xml'))
sp2ion.append(siesta_ion_xml(dname+'/H.ion.xml'))
lm = log_mesh(sp2ion=sp2ion)
self.assertEqual(lm.nr, 1024)
self.assertAlmostEqual(lm.rr[0], 0.0050308261951499981)
self.assertAlmostEqual(lm.rr[-1], 11.105004591662)
self.assertAlmostEqual(lm.pp[-1], 63.271890905445957)
self.assertAlmostEqual(lm.pp[0], 0.028663642914905942)
def test_log_mesh(self):
""" Test construction of log mesh with predefined grids"""
from pyscf.nao.log_mesh import funct_log_mesh
rr,pp=funct_log_mesh(1024, 1e-3, 15.0)
lm = log_mesh(rr=rr,pp=pp)
self.assertEqual(lm.nr, 1024)
self.assertAlmostEqual(lm.rr[0], 1e-3)
self.assertAlmostEqual(lm.rr[-1], 15.0)
self.assertAlmostEqual(lm.pp[-1], 318.3098861837907)
self.assertAlmostEqual(lm.pp[0], 0.021220659078919384)
if __name__ == "__main__": unittest.main()
| 36.484848
| 74
| 0.702658
|
fe68d558db00c8a14fd269e4f1f38d36548f1195
| 1,978
|
py
|
Python
|
tests/callbacks/test_run_directory_uploader.py
|
stanford-crfm/composer
|
4996fbd818971afd6439961df58b531d9b47a37b
|
[
"Apache-2.0"
] | null | null | null |
tests/callbacks/test_run_directory_uploader.py
|
stanford-crfm/composer
|
4996fbd818971afd6439961df58b531d9b47a37b
|
[
"Apache-2.0"
] | null | null | null |
tests/callbacks/test_run_directory_uploader.py
|
stanford-crfm/composer
|
4996fbd818971afd6439961df58b531d9b47a37b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
import os
import pathlib
import pytest
from composer.callbacks import RunDirectoryUploaderHparams
from composer.core.event import Event
from composer.core.logging import Logger
from composer.core.state import State
from composer.utils import dist, run_directory
@pytest.mark.parametrize("use_procs", [False, True])
@pytest.mark.timeout(15)
def test_run_directory_uploader(tmpdir: pathlib.Path, use_procs: bool, dummy_state: State, dummy_logger: Logger,
monkeypatch: pytest.MonkeyPatch):
remote_dir = str(tmpdir / "run_directory_copy")
os.makedirs(remote_dir, exist_ok=True)
monkeypatch.setenv("RUN_DIRECTORY_UPLOADER_KEY", remote_dir) # for the local option, the key is the path
provider = "local"
container = "."
hparams = RunDirectoryUploaderHparams(
provider=provider,
upload_every_n_batches=1,
key_environ="RUN_DIRECTORY_UPLOADER_KEY",
container=container,
num_concurrent_uploads=1,
use_procs=use_procs,
)
uploader = hparams.initialize_object()
filename = "dummy_file"
local_file = os.path.join(run_directory.get_run_directory(), filename)
uploader.run_event(Event.INIT, dummy_state, dummy_logger)
with open(local_file, "w+") as f:
f.write("Hello, world!")
uploader.run_event(Event.BATCH_END, dummy_state, dummy_logger)
uploader.close()
uploader.post_close()
test_name = os.path.basename(tmpdir)
# verify upload uri is correct
upload_uri = uploader.get_uri_for_uploaded_file(local_file)
expected_upload_uri = f"{provider}://{container}/{uploader._object_name_prefix}{filename}"
assert upload_uri == expected_upload_uri
# now assert that we have a dummy file in the run directory copy folder
with open(os.path.join(remote_dir, test_name, f"rank_{dist.get_global_rank()}", "dummy_file"), "r") as f:
assert f.read() == "Hello, world!"
| 37.320755
| 112
| 0.72548
|
49dfcfed54a2f91d1081b4b0b96adf015a977b3c
| 3,033
|
py
|
Python
|
examples/example_classification_sound.py
|
fbussv/PyDSlog
|
7a2d1a0a615ac261eca6d9e9613e7edbe7869217
|
[
"MIT"
] | null | null | null |
examples/example_classification_sound.py
|
fbussv/PyDSlog
|
7a2d1a0a615ac261eca6d9e9613e7edbe7869217
|
[
"MIT"
] | null | null | null |
examples/example_classification_sound.py
|
fbussv/PyDSlog
|
7a2d1a0a615ac261eca6d9e9613e7edbe7869217
|
[
"MIT"
] | null | null | null |
import PyDSlog.classificator as classificator
import pandas as pd
import numpy as np
from scipy import signal
from scipy.fftpack import fft
################## STREAM DATA PARAMETERS #################
N = 2000
f_s = 1000
T = 1 / f_s
PREFIX = "1478194076527"
X_FILES = [PREFIX+"_x_AI1U_.csv"]
Y_FILE = PREFIX+"_y_.csv"
################## DELETE OFFSET #################
def delete_offset(sig):
sig = signal.detrend(sig,type == 'constant')
return sig
################## FFT #################
def get_fft_values(y_values, T, N, f_s):
f_values = np.linspace(0.0, 1.0/(2.0*T), N//2)
fft_values_ = fft(y_values)
fft_values = 2.0/N * np.abs(fft_values_[0:N//2])
return f_values, fft_values
################## GENERATE FFTS #################
def do_ffts(signals):
s = []
for no in range(0, signals.shape[0]):
c = []
for co in range(0,signals.shape[1]):
sig = signals[no,co,:]
sig = delete_offset(sig)
freq_values, amp_values = get_fft_values(sig, T, N, f_s)
xy_values = np.vstack((freq_values, amp_values)).T
c.append(xy_values)
s.append(c)
return np.array(s)
def read_signals(name):
r = pd.read_csv(name, header=None, index_col=None)
return r
################## FILTER SIGNALS #################
def filter_signals(sig):
fc = 10
w = fc / (f_s / 2) # Normalize frequency
b, a = signal.butter(5, w, 'low')
s = []
for no in range(0, sig.shape[0]):
s1 = []
for co in range(0,sig.shape[1]):
s_ = sig[no,co,:]
s_ = signal.filtfilt(b, a, s_)
s1.append(s_)
s.append(s1)
return np.array(s)
signals = []
for file in X_FILES:
s = np.array(read_signals("../test/test/"+file))
signals.append(s)
signals = np.transpose(np.array(signals), (1, 0, 2))
labels = np.array(pd.read_csv("../test/test/"+Y_FILE, header=None, index_col=None))
labels = np.squeeze(labels)
signals_filtered = filter_signals(signals)
print("signals filtered shape: ", signals_filtered.shape)
signals_ffts = do_ffts(signals_filtered)
signals_ffts = signals_ffts[:,:,:,1]
print(signals_ffts.shape)
import matplotlib.pyplot as plt
plt.figure(1,figsize=(20,5))
plt.plot(signals_filtered[10,0,:])
plt.plot(signals_filtered[150,0,:])
plt.plot(signals_filtered[240,0,:])
plt.show()
################## TRAIN TEST SPLIT #################
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(signals_ffts, labels, test_size=0.4)
clf = classificator.SignalClassificator("medium_correlation")
clf.fit(x_train, y_train, verbose=True)
y_pred = clf.predict(x_test, 0, verbose=True)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
plt.figure(1,figsize=(20,5))
plt.plot(clf.master_dict["0"][0,:], color="yellow")
plt.plot(clf.master_dict["1"][0,:], color="green")
plt.plot(clf.master_dict["2"][0,:], color="red")
plt.show()
| 26.840708
| 88
| 0.612595
|
bab2f0328fab70389ad80e631d4cb5a31f61c73c
| 26,265
|
py
|
Python
|
openmdao/func_api.py
|
gdmcbain/OpenMDAO
|
ded968b82e05951979c14348b68c1c70b671055d
|
[
"Apache-2.0"
] | null | null | null |
openmdao/func_api.py
|
gdmcbain/OpenMDAO
|
ded968b82e05951979c14348b68c1c70b671055d
|
[
"Apache-2.0"
] | 1
|
2021-09-15T20:02:09.000Z
|
2021-09-15T20:02:09.000Z
|
openmdao/func_api.py
|
gdmcbain/OpenMDAO
|
ded968b82e05951979c14348b68c1c70b671055d
|
[
"Apache-2.0"
] | 3
|
2016-09-06T21:21:56.000Z
|
2017-05-31T15:01:11.000Z
|
"""
API to associate metadata with and retrieve metadata from function objects.
"""
from numbers import Number
import ast
import inspect
import textwrap
import warnings
import numpy as np
from contextlib import contextmanager
try:
import jax
import jax.numpy as jnp
except ImportError:
jax = None
_allowed_add_input_args = {
'val', 'shape', 'units', 'desc', 'tags', 'shape_by_conn', 'copy_shape', 'distributed',
'new_style_idx'
}
_allowed_add_output_args = {
'val', 'shape', 'units', 'res_units', 'desc' 'lower', 'upper', 'ref', 'ref0', 'res_ref', 'tags',
'shape_by_conn', 'copy_shape', 'distributed'
}
_allowed_defaults_args = _allowed_add_input_args.union(_allowed_add_output_args)
_allowed_declare_options_args = {
'default', 'values', 'types', 'desc', 'upper', 'lower', 'check_valid', 'allow_none',
'recordable', 'deprecation'
}
_allowed_declare_partials_args = {
'of', 'wrt', 'dependent', 'rows', 'cols', 'val', 'method', 'step', 'form', 'step_calc',
'minimum_step'
}
_allowed_declare_coloring_args = {
'of', 'wrt', 'method', 'form', 'step', 'per_instance', 'num_full_jacs', 'tol', 'orders',
'perturb_size', 'min_improve_pct', 'show_summary', 'show_sparsity'
}
class OMWrappedFunc(object):
"""
Function wrapper that holds function metadata useful to OpenMDAO.
Parameters
----------
func : function
The function to be wrapped.
Attributes
----------
_f : function
The wrapped function.
_defaults : dict
Dict of default metadata values that could apply to any variable.
_inputs : dict
Dict of metadata dicts keyed to input name.
_outputs : dict
Dict of metadata dicts keyed to output name.
_declare_partials : list
List of keyword args, one entry for each call to declare_partials.
_declare_coloring : dict
Keyword args for call to declare_coloring.
_call_setup : bool
If True, call the setup functions for input and output metadata.
_use_jax : bool
If True, use jax to compute output shapes based on input shapes.
"""
def __init__(self, func):
"""
Initialize attributes.
"""
self._f = func
self._defaults = {'val': 1.0, 'shape': ()}
# populate _inputs dict with input names based on function signature so we can error
# check vs. inputs added via add_input
self._inputs = {n: {'val': None if p.default is inspect._empty else p.default}
for n, p in inspect.signature(func).parameters.items()}
self._outputs = {}
self._declare_partials = []
self._declare_coloring = None
self._call_setup = True
self._use_jax = False
def __call__(self, *args, **kwargs):
r"""
Call the wrapped function.
Parameters
----------
*args : list
Positional args.
**kwargs : dict
Keyword args.
Returns
-------
object
The return of the wrapped function.
"""
return self._f(*args, **kwargs)
def defaults(self, **kwargs):
r"""
Add metadata that may apply to any inputs or outputs of the wrapped function.
Any variable specific metadata will override metadata specified here.
Parameters
----------
**kwargs : dict
Metadata names and their values.
"""
_check_kwargs(kwargs, _allowed_defaults_args, 'defaults')
self._defaults.update(kwargs)
return self
def add_input(self, name, **kwargs):
r"""
Add metadata for an input of the wrapped function.
Parameters
----------
name : str
Name of the input variable.
**kwargs : dict
Keyword args to store.
"""
if name not in self._inputs:
raise NameError(f"In add_input, '{name}' is not an input to this function.")
meta = self._inputs[name]
for kw in kwargs:
if kw in meta and meta[kw] is not None:
raise RuntimeError(f"In add_input, metadata '{kw}' has already been added to "
f"function for input '{name}'.")
if meta.get('val') is not None and kwargs.get('val') is not None:
self._check_vals_equal(name, meta['val'], kwargs['val'])
_check_kwargs(kwargs, _allowed_add_input_args, 'add_input')
meta.update(kwargs)
return self
def add_inputs(self, **kwargs):
r"""
Add metadata for multiple inputs of the wrapped function.
Parameters
----------
**kwargs : dict
Keyword args to store. The value corresponding to each key is a dict containing the
metadata for the input name that matches that key.
"""
for name, meta in kwargs.items():
self.add_input(name, **meta)
return self
def add_output(self, name, **kwargs):
r"""
Add metadata for an output of the wrapped function.
Parameters
----------
name : str
Name of the output variable.
**kwargs : dict
Keyword args to store.
"""
if name in self._inputs:
raise RuntimeError(f"In add_output, '{name}' already registered as an input.")
if name in self._outputs:
raise RuntimeError(f"In add_output, '{name}' already registered as an output.")
_check_kwargs(kwargs, _allowed_add_output_args, 'add_output')
self._outputs[name] = kwargs
return self
def add_outputs(self, **kwargs):
r"""
Add metadata for multiple outputs of the wrapped function.
Parameters
----------
**kwargs : dict
Keyword args to store. The value corresponding to each key is a dict containing the
metadata for the output name that matches that key.
"""
for name, meta in kwargs.items():
self.add_output(name, **meta)
return self
def output_names(self, *names):
r"""
Set the names of a function's output variables.
Parameters
----------
*names : list of str
Names of outputs with order matching order of return values.
Returns
-------
function
A function wrapper that updates the function's metadata.
"""
kwargs = {n: {} for n in names}
return self.add_outputs(**kwargs)
def declare_option(self, name, **kwargs):
r"""
Collect name and keyword args to later declare an option on an OpenMDAO component.
Parameters
----------
name : str
Name of the option variable.
**kwargs : dict
Keyword args to store.
"""
_check_kwargs(kwargs, _allowed_declare_options_args, 'declare_option')
self._inputs[name].update(kwargs)
self._inputs[name]['is_option'] = True
return self
def declare_partials(self, **kwargs):
r"""
Collect args to be passed to declare_partials on an OpenMDAO component.
Parameters
----------
**kwargs : dict
Keyword args to store.
"""
_check_kwargs(kwargs, _allowed_declare_partials_args, 'declare_partials')
self._declare_partials.append(kwargs)
if 'method' in kwargs and kwargs['method'] == 'jax':
self._use_jax = True
return self
def declare_coloring(self, **kwargs):
r"""
Collect args to be passed to declare_coloring on an OpenMDAO component.
Parameters
----------
**kwargs : dict
Keyword args to store.
"""
if self._declare_coloring is None:
_check_kwargs(kwargs, _allowed_declare_coloring_args, 'declare_coloring')
self._declare_coloring = kwargs.copy()
return self
raise RuntimeError("declare_coloring has already been called.")
def get_input_meta(self):
"""
Get an iterator of (name, metdata_dict) for each input variable.
Returns
-------
iter of (str, dict)
Iterator of (name, metdata_dict) for each input variable.
"""
if self._call_setup:
self._setup()
return self._inputs.items()
def get_input_names(self):
"""
Get an iterator over input variable names.
Yields
------
str
Name of each input variable.
"""
for name, _ in self.get_input_meta():
yield name
def get_output_meta(self):
"""
Get an iterator of (name, metdata_dict) for each output variable.
Returns
-------
iter of (str, dict)
Iterator of (name, metdata_dict) for each output variable.
"""
if self._call_setup:
self._setup()
return self._outputs.items()
def get_output_names(self):
"""
Get an iterator over output variable names.
Yields
------
str
Name of each output variable.
"""
for name, _ in self.get_output_meta():
yield name
def get_declare_partials(self):
"""
Get an iterator of keyword args passed to each declare_partials call.
Returns
-------
iter of dict
Iterator of dicts containing the keyword args for each call.
"""
return self._declare_partials
def get_declare_coloring(self):
"""
Get keyword args passed to declare_coloring call.
Returns
-------
iter of dict
Iterator of dicts containing the keyword args for each call.
"""
return self._declare_coloring
def _check_vals_equal(self, name, val1, val2):
"""
Compare two values that could be a mix of ndarray and other types.
Parameters
----------
name : str
Name of the variable (for error reporting).
val1 : object
First value.
val2 : object
Second value.
"""
# == is more prone to raise exceptions when ndarrays are involved, so use !=
neq = val1 != val2
if (isinstance(neq, np.ndarray) and np.any(neq)) or neq:
raise RuntimeError(f"Conflicting metadata entries for '{name}'.")
def _resolve_default(self, key, meta):
"""
Update the value of the metadata corresponding to key based on self._defaults.
Parameters
----------
key : str
The metadata entry key.
meta : dict
The metadata dict to be updated.
"""
if (key not in meta or meta[key] is None) and key in self._defaults:
meta[key] = self._defaults[key]
def _setup(self):
"""
Set up input and output variable metadata dicts.
"""
self._call_setup = False
overrides = set(self._defaults)
self._setup_inputs(overrides)
self._setup_outputs(overrides)
def _setup_inputs(self, overrides):
"""
Set up the input variable metadata dicts.
Parameters
----------
overrides : set
Set of names of entries in self._defaults.
"""
ins = self._inputs
overrides = overrides - {'val', 'shape'}
# first, retrieve inputs from the function signature
for name in inspect.signature(self._f).parameters:
meta = ins[name]
if meta.get('is_option'):
continue
if 'val' in meta and meta['val'] is not None:
valshape = np.asarray(meta['val']).shape
else:
valshape = None
meta['val'] = self._defaults['val']
if meta.get('shape') is None:
if valshape is not None:
meta['shape'] = valshape
else:
meta['shape'] = self._defaults['shape']
meta['shape'] = _shape2tuple(meta['shape'])
if not valshape: # val is a scalar so reshape with the given meta['shape']
meta['val'] = np.ones(meta['shape']) * meta['val']
elif valshape != meta['shape']:
raise ValueError(f"Input '{name}' default value has shape "
f"{valshape}, but shape was specified as {meta['shape']}.")
for o in overrides:
self._resolve_default(o, meta)
def _setup_outputs(self, overrides):
"""
Set up the output variable metadata dicts.
Parameters
----------
overrides : set
Set of names of entries in self._defaults.
"""
outmeta = {}
# Parse the function code to possibly identify the names of the return values and
# input/output dependencies. Return names will be non-None only if they are a simple name,
# e.g., return a, b, c
outlist = []
try:
ret_info = get_function_deps(self._f)
except RuntimeError:
# this could happen if function is compiled or has multiple return lines
if not self._outputs:
raise RuntimeError(f"Couldn't determine function return names or "
"number of return values based on AST and no return value "
"metadata was supplied.")
warnings.warn("Couldn't determine function return names based on AST. Assuming number "
"of return values matches number of outputs defined in the metadata.")
outlist = list(self._outputs.items())
else:
for o, deps in ret_info:
outlist.append([o, {'deps': deps}])
notfound = []
for oname, ometa in self._outputs.items():
for n, meta in outlist:
if n == oname:
if meta is not ometa:
meta.update(ometa)
break
else: # didn't find oname
notfound.append(oname)
if notfound: # try to fill in the unnamed slots with user-supplied output data
inones = [i for i, (n, m) in enumerate(outlist) if n is None] # indices with no name
if len(notfound) != len(inones):
raise RuntimeError(f"There must be an unnamed return value for every unmatched "
f"output name {notfound} but only found {len(inones)}.")
# number of None return slots equals number of entries not found in outlist
for i_olist, name_notfound in zip(inones, notfound):
m = self._outputs[name_notfound]
_, ret_meta = outlist[i_olist]
ret_meta.update(m)
outlist[i_olist] = (name_notfound, ret_meta)
outs = {n: m for n, m in outlist}
if self._use_jax:
self._compute_out_shapes(self._inputs, outs)
for meta in outs.values():
for o in overrides:
self._resolve_default(o, meta)
if meta['shape'] is not None:
meta['shape'] = _shape2tuple(meta['shape'])
self._outputs = outs
def _compute_out_shapes(self, ins, outs):
"""
Compute the shapes of outputs based on those of the inputs.
Parameters
----------
ins : dict
Dict of input metadata containing input shapes.
outs : dict
Dict of output metadata that will be updated with shape information.
"""
need_shape = []
for name, ometa in outs.items():
try:
ometa['shape']
except KeyError:
need_shape.append(name)
args = []
for name, meta in ins.items():
if 'is_option' in meta and meta['is_option']:
if 'default' in meta:
val = meta['default']
elif 'values' in meta:
val = meta['values'][0]
else:
val = None
args.append(val)
continue
if meta['val'] is not None:
args.append(meta['val'])
else:
try:
shp = meta['shape']
except KeyError:
raise RuntimeError(f"Can't determine shape of input '{name}'.")
if jax is not None:
args.append(jax.ShapedArray(_shape2tuple(shp), dtype=np.float64))
# compute shapes as a check against annotated value (if any)
if jax is not None:
# must replace numpy with jax numpy when making jaxpr.
with jax_context(self._f.__globals__):
try:
v = jax.make_jaxpr(self._f)(*args)
except Exception as err:
if need_shape:
raise RuntimeError(f"Failed to determine the output shapes "
f"based on the input shapes. The error was: {err}. To "
"avoid this error, add return value annotations that "
"specify the shapes of the return values to the "
"function.")
warnings.warn("Failed to determine the output shapes based on the input "
"shapes in order to check the provided annotated values. The"
f" error was: {err}.")
else:
for val, name in zip(v.out_avals, outs):
oldshape = outs[name].get('shape')
if oldshape is not None and _shape2tuple(oldshape) != val.shape:
raise RuntimeError(f"Annotated shape for return value "
f"'{name}' of {oldshape} doesn't match computed "
f"shape of {val.shape}.")
outs[name]['shape'] = val.shape
need_shape = []
if need_shape: # output shapes weren't provided by user or by jax
shape = self._defaults['shape']
warnings.warn(f"Return values {need_shape} have unspecified shape so are assumed to "
f"have shape {shape}.")
for name in need_shape:
outs[name]['shape'] = shape
def wrap(func):
"""
Return a wrapped function object.
If arg is already a wrapped function object, return that.
Parameters
----------
func : function or OMwrappedFunc
A plain or already wrapped function object.
Returns
-------
OMwrappedFunc
The wrapped function object.
"""
if isinstance(func, OMWrappedFunc):
return func
return OMWrappedFunc(func)
def _get_kwargs(func, locals_dict, default=None):
"""
Convert a function's args to a kwargs dict containing entries that are not identically default.
Parameters
----------
func : function
The function whose args we want to convert to kwargs.
locals_dict : dict
The locals dict for the function.
default : object
Don't include arguments whose values are this object.
Returns
-------
dict
The non-default keyword args dict.
"""
return {n: locals_dict[n] for n in inspect.signature(func).parameters
if locals_dict[n] is not default}
def _check_kwargs(kwargs, allowed, fname):
"""
Check contents of kwargs for args that aren't allowed.
Parameters
----------
kwargs : dict
Original keyword args dict.
allowed : set
Set of allowed arg names.
fname : str
Function name (for error reporting).
"""
errs = [n for n in kwargs if n not in allowed]
if errs:
raise RuntimeError(f"In {fname}, metadata names {errs} are not allowed.")
def _shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
@contextmanager
def jax_context(globals):
"""
Create a context where np and numpy are replaced by their jax equivalents.
Parameters
----------
globals : dict
The globals dict to have its numpy/np attributes updated.
"""
savenp = savenumpy = None
if 'np' in globals and globals['np'] is np:
savenp = globals['np']
globals['np'] = jnp
if 'numpy' in globals:
savenumpy = globals['numpy']
globals['numpy'] = jnp
try:
yield
finally:
if savenp is not None:
globals['np'] = savenp
if savenumpy is not None:
globals['numpy'] = savenumpy
def _get_long_name(node):
"""
Return a name (possibly dotted) corresponding to the give node or None.
If the node is a Name node or an Attribute node that is composed only of other Attribute or
Name nodes, then return the full dotted name for this node. Otherwise, i.e., if this node
contains other expressions.
Parameters
----------
node : ASTnode
A node of an abstract syntax tree.
Returns
-------
str or None
Name corresponding to the given node.
"""
if isinstance(node, ast.Name):
return node.id
elif not isinstance(node, ast.Attribute):
return None
val = node.value
parts = [node.attr]
while True:
if isinstance(val, ast.Attribute):
parts.append(val.attr)
val = val.value
elif isinstance(val, ast.Name):
parts.append(val.id)
break
else: # it's more than just a simple dotted name
return None
return '.'.join(parts[::-1])
class _FuncDepCollector(ast.NodeVisitor):
"""
An ast.NodeVisitor that records dependencies between inputs and outputs.
Each instance of this is single-use. If needed multiple times create a new instance
each time. It also assumes that the AST to be visited contains only a single function
definition.
Attributes
----------
_ret_info : list
List containing name (or None) for each function return value.
"""
def __init__(self, func):
super().__init__()
self._attrs = None
self._deps = {}
self._ret_info = []
self.visit(ast.parse(textwrap.dedent(inspect.getsource(func)), mode='exec'))
def _do_assign(self, targets, rhs):
lhs_attrs = []
for t in targets:
lhs_attrs.append(_get_long_name(t))
self._attrs = set()
self.visit(rhs)
for a in lhs_attrs:
if a not in self._deps:
self._deps[a] = set()
self._deps[a].update(self._attrs)
self._attrs = None
def visit_Attribute(self, node):
if self._attrs is not None:
self._attrs.add(_get_long_name(node))
def visit_Name(self, node):
if self._attrs is not None:
self._attrs.add(node.id)
def visit_Assign(self, node):
self._do_assign(node.targets, node.value)
def visit_AugAssign(self, node):
self._do_assign((node.target,), node.value)
def visit_AnnAssign(self, node):
if node.value is not None:
self._do_assign((node.target,), node.value)
def visit_Call(self, node): # (func, args, keywords, starargs, kwargs)
for arg in node.args:
self.visit(arg)
for kw in node.keywords:
self.visit(kw.value)
def _get_return_attrs(self, node):
self._attrs = set()
self.visit(node)
# also include a boolean indicating if the return expr is a simple name
self._ret_info.append((tuple(self._attrs), isinstance(node, ast.Name)))
self._attrs = None
def visit_Return(self, node):
"""
Visit a Return node.
Parameters
----------
node : ASTnode
The return node being visited.
"""
if self._ret_info:
raise RuntimeError("_FuncDepCollector does not support multiple returns in a "
"single function. Either the given function contains multiple "
"returns or this _FuncDepCollector instance has been used "
"more than once, which is unsupported.")
if isinstance(node.value, ast.Tuple):
for n in node.value.elts:
self._get_return_attrs(n)
else:
self._get_return_attrs(node.value)
def get_function_deps(func):
"""
Return dependency between return value(s) and inputs.
Parameters
----------
func : function
The function used to compute input/output dependencies.
Returns
-------
list
List of the form (name or None, dependency_set) containing one entry for each return
value. 'name' will be the name of the return value if it has a simple name, otherwise
None.
"""
input_names = set(inspect.signature(func).parameters)
funcdeps = _FuncDepCollector(func)
deps = funcdeps._deps
retdeps = []
for names, _ in funcdeps._ret_info:
depset = set()
for n in names:
stack = [n]
seen = set()
while stack:
v = stack.pop()
seen.add(v)
if v in input_names:
depset.add(v)
elif v in deps:
stack.extend([d for d in deps[v] if d not in seen])
retdeps.append(depset)
return [(n[0] if simple and n[0] not in input_names else None, d)
for ((n, simple), d) in zip(funcdeps._ret_info, retdeps)]
| 31.759371
| 100
| 0.55968
|
cb111b99d1a9147cafa9998d1004bab2eeb96d82
| 1,022
|
py
|
Python
|
configs/config4.py
|
ebrunet28/MultiDecoder-DPRNN
|
36fd6c35e730379e4f676a25eac451409a01f068
|
[
"MIT"
] | 8
|
2021-01-22T07:33:55.000Z
|
2022-01-14T03:11:55.000Z
|
configs/config4.py
|
ebrunet28/MultiDecoder-DPRNN
|
36fd6c35e730379e4f676a25eac451409a01f068
|
[
"MIT"
] | null | null | null |
configs/config4.py
|
ebrunet28/MultiDecoder-DPRNN
|
36fd6c35e730379e4f676a25eac451409a01f068
|
[
"MIT"
] | 4
|
2021-02-28T13:30:21.000Z
|
2022-03-28T12:47:08.000Z
|
import time
import sys
import os
config = os.path.basename(__file__).split('.')[0]
root = "/ws/ifp-10_3/hasegawa/junzhez2/Baseline_Model"
lamb = 0.5
maxlen = 4
minlen = 2
kernel_size = 8
enc = 256
bottleneck = 64
hidden = 128
num_layers = 6
K = 125
num_spks = 5
epochs = 128
half_lr = True # feature not enabled
early_stop = True
max_norm = 5
shuffle = False
batch_size = 3
norm = 'ln'
rnn_type = 'LSTM'
dropout = 0.0
lr = 5e-4
lr_override = False
momentum = 0.0
l2 = 0.0 # weight decay
save_folder = os.path.join(root, 'models')
checkpoint = 1
continue_from = os.path.join(save_folder, "config4.pth") # if not exist, randomly initialize
model_path = config + "_best.pth" # best model save path
print_freq = 10
comment = ' weighted chunk resampling'
log_dir = os.path.join(root, 'runs', time.strftime("%Y%m%d-%H%M%S") + config + comment)
use_onoff = True # hungarian model if on, DPRNN if off. useless if multidecoder=True
multiloss = True
mul = True
cat = True
decay_period = 1
multidecoder = True
device_ids = [0, 1, 2]
| 24.333333
| 92
| 0.715264
|
76a7193b80321206f370bced1ad192bc12f327df
| 5,674
|
py
|
Python
|
src/ai-did-you-mean-this/azext_ai_did_you_mean_this/tests/latest/data/scenarios.py
|
SDKAutoUP/azure-cli-extensions
|
5991b1656318130b78ea83a2aa87386fbe0b60eb
|
[
"MIT"
] | null | null | null |
src/ai-did-you-mean-this/azext_ai_did_you_mean_this/tests/latest/data/scenarios.py
|
SDKAutoUP/azure-cli-extensions
|
5991b1656318130b78ea83a2aa87386fbe0b60eb
|
[
"MIT"
] | null | null | null |
src/ai-did-you-mean-this/azext_ai_did_you_mean_this/tests/latest/data/scenarios.py
|
SDKAutoUP/azure-cli-extensions
|
5991b1656318130b78ea83a2aa87386fbe0b60eb
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import List
from azext_ai_did_you_mean_this._cli_command import CliCommand
from azext_ai_did_you_mean_this._suggestion import Suggestion
from azext_ai_did_you_mean_this.tests.latest.data._command_parameter_normalization_scenario import \
CommandParameterNormalizationScenario
from azext_ai_did_you_mean_this.tests.latest.data._command_normalization_scenario import \
CommandNormalizationScenario
from azext_ai_did_you_mean_this.tests.latest.data._scenario import Scenario
from azext_ai_did_you_mean_this.tests.latest.data.user_fault_type import \
UserFaultType
TEST_SCENARIOS: List[Scenario] = [
Scenario(
cli_command=CliCommand('account'),
expected_user_fault_type=UserFaultType.MISSING_REQUIRED_SUBCOMMAND,
suggestions=[
Suggestion('account list'),
Suggestion('account show'),
Suggestion('account set', '--subscription', 'Subscription')
]
),
Scenario(
cli_command=CliCommand('account get-access-token', ['--test', '--debug'], 'a'),
expected_user_fault_type=UserFaultType.UNRECOGNIZED_ARGUMENTS
),
Scenario(
cli_command=CliCommand('ai-did-you-mean-this ve'),
expected_user_fault_type=UserFaultType.NOT_IN_A_COMMAND_GROUP,
extension='ai-did-you-mean-this'
),
Scenario(
cli_command=CliCommand('ai-did-you-mean-this version', '--name', '"Christopher"'),
expected_user_fault_type=UserFaultType.UNRECOGNIZED_ARGUMENTS,
extension='ai-did-you-mean-this'
),
Scenario(
cli_command=CliCommand('boi'),
expected_user_fault_type=UserFaultType.NOT_IN_A_COMMAND_GROUP
),
Scenario(
cli_command=CliCommand('extension'),
expected_user_fault_type=UserFaultType.MISSING_REQUIRED_SUBCOMMAND,
suggestions=[
Suggestion('extension list')
]
),
Scenario(
cli_command=CliCommand('vm', '--debug'),
expected_user_fault_type=UserFaultType.MISSING_REQUIRED_SUBCOMMAND
),
Scenario(
cli_command=CliCommand('vm list', '--query', '".id"'),
expected_user_fault_type=UserFaultType.INVALID_JMESPATH_QUERY,
suggestions=[
Suggestion('vm list', ['--output', '--query'], ['json', '"[].id"'])
]
),
Scenario(
cli_command=CliCommand('vm show', ['--name', '--ids'], '"BigJay"'),
expected_user_fault_type=UserFaultType.EXPECTED_AT_LEAST_ONE_ARGUMENT
),
]
NORMALIZATION_TEST_SCENARIOS: List[CommandParameterNormalizationScenario] = [
# global + command shorthand parameters with 1 longhand duplicate
CommandParameterNormalizationScenario(
command='vm show',
parameters=['-g', '--name', '-n', '--subscription', '-o'],
normalized_parameters=['--name', '--resource-group', '--subscription', '--output'],
),
# global + command shorthand parameters with 2 longhand duplicates
# global parameter prefix
CommandParameterNormalizationScenario(
command='vm create',
parameters=[
'-z', '--vmss', '--location', '-l', '--nsg', '--subnet',
'-g', '--name', '-n', '--subscription', '--out', '--ultra-ssd',
'-h'
],
normalized_parameters=[
'--zone', '--vmss', '--location', '--nsg', '--subnet', '--name',
'--resource-group', '--subscription', '--output', '--ultra-ssd-enabled',
'--help'
]
),
# command group + global parameter fallback
CommandParameterNormalizationScenario(
command='account',
add_global_parameters=True
),
# command shorthand parameter
CommandParameterNormalizationScenario(
command='account set',
parameters='-s',
normalized_parameters='--subscription'
),
# no parameters
CommandParameterNormalizationScenario(
command='account set'
),
# global parameter prefixes + duplicate parameters
CommandParameterNormalizationScenario(
command='account list',
parameters=['--out', '--query', '--all', '--all'],
normalized_parameters=['--output', '--query', '--all']
),
# invalid parameters for command
CommandParameterNormalizationScenario(
command='extension list',
parameters=['--foo', '--bar'],
normalized_parameters=''
),
# invalid parameter for command + global parameters
CommandParameterNormalizationScenario(
command='ai-did-you-mean-this version',
parameters=['--baz'],
add_global_parameters=True
),
# global parameters
CommandParameterNormalizationScenario(
command='kusto cluster create',
add_global_parameters=True
),
# parameter shorthand + prefixes
CommandParameterNormalizationScenario(
command='group create',
parameters=['-l', '-n', '--manag', '--tag', '--s'],
normalized_parameters=['--location', '--resource-group', '--managed-by', '--tags', '--subscription']
),
# invalid command and invalid parameters
CommandParameterNormalizationScenario(
CommandNormalizationScenario('Lorem ipsum.', 'Lorem'),
parameters=['--foo', '--baz']
),
# invalid (empty) command and no parameters
CommandParameterNormalizationScenario(
command=''
)
]
| 38.863014
| 108
| 0.635883
|
352c08b882848d80f4bb133a4415633417336daa
| 1,649
|
py
|
Python
|
Performance.py
|
evan-mcginnis/weeds
|
dabda454622d021be3aa92306d6d796e4388d080
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
Performance.py
|
evan-mcginnis/weeds
|
dabda454622d021be3aa92306d6d796e4388d080
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
Performance.py
|
evan-mcginnis/weeds
|
dabda454622d021be3aa92306d6d796e4388d080
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# P E R F O R M A N C E
#
from datetime import datetime
import constants as constants
class Performance:
def __init__(self, performanceFile: str):
self.times = {}
self._performanceFile = performanceFile
def initialize(self) -> bool:
"""
Initialize the performance data file, truncating it to 0 bytes.
This will also insert the headers for the data.
:return: Boolean
"""
diagnostics = "Performance initialized"
try:
file = open(self._performanceFile, "w")
# clear out any data that is there
file.truncate(0)
# Write out the headers for the performance data
file.write("{},{}\n".format(constants.PERF_TITLE_ACTIVITY, constants.PERF_TITLE_MILLISECONDS))
file.close()
except PermissionError:
diagnostics = "Unable to open: {}\n".format(self._performanceFile)
return False, diagnostics
return True, diagnostics
def start(self) -> int:
"""
Start the performance timer
:return:
The current time
"""
self._start = datetime.now()
return self._start
def stop(self) -> int:
self._elapsed = datetime.now() - self._start
self._elapsed_milliseconds = self._elapsed.total_seconds() * 1000
return self._elapsed_milliseconds
def stopAndRecord(self, name : str):
self.stop()
with open(self._performanceFile,"a") as self._file:
self._file.write("%s,%s\n" % (name, str(self._elapsed_milliseconds)))
def cleanup(self):
self._file.close()
| 31.711538
| 106
| 0.607641
|
bb6c9cee4e94d9ad19f89e72ccdce18bb4b4b01d
| 2,160
|
py
|
Python
|
tests/nantest.py
|
fgregg/fastcluster
|
8bd94197b782b9c86469a665e889cf907b90ebc3
|
[
"BSD-2-Clause"
] | 92
|
2016-03-12T09:27:31.000Z
|
2022-03-20T23:48:45.000Z
|
tests/nantest.py
|
fgregg/fastcluster
|
8bd94197b782b9c86469a665e889cf907b90ebc3
|
[
"BSD-2-Clause"
] | 27
|
2016-04-28T04:51:51.000Z
|
2022-02-27T13:50:29.000Z
|
tests/nantest.py
|
fgregg/fastcluster
|
8bd94197b782b9c86469a665e889cf907b90ebc3
|
[
"BSD-2-Clause"
] | 29
|
2016-03-16T14:18:26.000Z
|
2022-03-30T23:26:34.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Test whether the fastcluster package correctly recognizes NaN values
and raises a FloatingPointError.'''
print('''
Test program for the 'fastcluster' package.
Copyright:
* Until package version 1.1.23: (c) 2011 Daniel Müllner <http://danifold.net>
* All changes from version 1.1.24 on: (c) Google Inc. <http://google.com>''')
import numpy as np
import fastcluster
version = '1.2.4'
if fastcluster.__version__ != version:
raise ValueError('Wrong module version: {} instead of {}.'.format(fastcluster.__version__, version))
import atexit
def print_seed():
print("Seed: {0}".format(seed))
atexit.register(print_seed)
seed = np.random.randint(0,1e9)
np.random.seed(seed)
def test():
n = np.random.randint(2,100)
# Part 1: distance matrix input
N = n*(n-1)//2
D = np.random.rand(N)
# Insert a single NaN value
pos = np.random.randint(N)
D[pos] = np.nan
for method in ['single', 'complete', 'average', 'weighted', 'ward',
'centroid', 'median']:
try:
fastcluster.linkage(D, method=method)
raise AssertionError('fastcluster did not detect a NaN value!')
except FloatingPointError:
pass
# Next: the original array does not contain a NaN, but a NaN occurs
# as an updated distance.
for method in ['average', 'weighted', 'ward', 'centroid', 'median']:
try:
fastcluster.linkage([np.inf,-np.inf,-np.inf], method=method)
raise AssertionError('fastcluster did not detect a NaN value!')
except FloatingPointError:
pass
# Part 2: vector input
dim = np.random.randint(2,13)
X = np.random.rand(n,dim)
pos = (np.random.randint(n), np.random.randint(dim))
# Insert a single NaN coordinate
X[pos] = np.nan
for method in ['single', 'ward', 'centroid', 'median']:
try:
fastcluster.linkage_vector(X, method=method)
raise AssertionError('fastcluster did not detect a NaN value!')
except FloatingPointError:
pass
if __name__ == "__main__":
test()
print('OK.')
| 30
| 104
| 0.63287
|
7b6ab69dc0bd9125e287851780af345a7ca10a30
| 19,402
|
py
|
Python
|
analysis/utils.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | null | null | null |
analysis/utils.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | 3
|
2021-07-15T13:23:28.000Z
|
2021-12-09T03:32:16.000Z
|
analysis/utils.py
|
bizeasy17/investtrack
|
3840948896573f3906a5df80ea80859a492f4133
|
[
"MIT"
] | 1
|
2021-08-19T14:42:59.000Z
|
2021-08-19T14:42:59.000Z
|
from datetime import date, datetime, timedelta
import numpy as np
import pandas as pd
# from investors.models import TradeStrategy
import tushare as ts
from dashboard.utils import days_between
from django.utils import timezone
from .models import (AnalysisEventLog, StockHistoryDaily, StockStrategyTestLog,
StrategyTargetPctTestQuantiles,
StrategyUpDownTestQuantiles, TradeStrategyStat, StockIndexHistory)
strategy_dict = {'jiuzhuan_bs': {'jiuzhuan_count_b', 'jiuzhuan_count_s'}, 'dingdi': {'dingbu_s', 'dibu_b'},
'tupo_yali_b': {'tupo_b'}, 'diepo_zhicheng_s': {'diepo_s'}, 'wm_dingdi_bs': {'m_ding', 'w_di'},
'junxian25_bs': {'ma25_zhicheng', 'ma25_diepo', 'ma25_yali', 'ma25_tupo'},
'junxian60_bs': {'ma60_zhicheng', 'ma60_diepo', 'ma60_yali', 'ma60_tupo'},
'junxian200_bs': {'ma200_zhicheng', 'ma200_diepo', 'ma200_yali', 'ma200_tupo', }}
def get_market_code(ts_code):
indexes = {'6': '000001.SH', '0': '399001.SZ',
'3': '399006.SZ', '688': '000688.SH'}
try:
if ts_code[0] == '3':
return indexes['3']
elif ts_code[0] == '0':
return indexes['0']
else:
if ts_code[:3] == '688':
return indexes['688']
else:
return indexes['6']
except Exception as err:
print(err)
# X-Forwarded-For:简称XFF头,它代表客户端,也就是HTTP的请求端真实的IP,只有在通过了HTTP 代理或者负载均衡服务器时才会添加该项。
def get_ip(request):
'''获取请求者的IP信息'''
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') # 判断是否使用代理
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0] # 使用代理获取真实的ip
else:
ip = request.META.get('REMOTE_ADDR') # 未使用代理获取IP
return ip
def log_test_status(ts_code, event, freq, strategy_list=[]):
for strategy in strategy_list:
try:
mark_log = StockStrategyTestLog.objects.get(
ts_code=ts_code, analysis_code=strategy, event_type=event, freq=freq, is_done=False)
mark_log.is_done = True
except Exception as e:
# print(e)
mark_log = StockStrategyTestLog(ts_code=ts_code,
analysis_code=strategy,
event_type=event, freq=freq, is_done=True)
# try:
# strategy_stat = TradeStrategyStat.objects.get(applied_period=freq, code=strategy)
# strategy_stat.hist_analyzed = True
# strategy_stat.save()
# except Exception as e:
# print(e)
mark_log.save()
def ready2proceed(strategy_code, freq='D'):
exec_date = date.today()
evt_dl_status = get_event_status(
'HIST_DOWNLOAD', exec_date=exec_date, freq=freq)
evt_mk_status = get_event_status(
'MARK_CP', exec_date=exec_date, strategy_code=strategy_code, freq=freq)
if evt_dl_status == 0:
print("previous downloading is still ongoing")
return False
elif evt_dl_status == -1:
print("history has not yet been downloaded today")
return False
else:
if evt_mk_status == 0:
print("previous marking is still ongoing")
return False
elif evt_mk_status == 1:
print("marking has been done today")
return False
return True
def is_hist_downloaded(freq='D'):
exec_date = date.today()
evt_dl_status = get_event_status(
'HIST_DOWNLOAD', exec_date=exec_date, freq=freq)
if evt_dl_status == 0:
print("previous downloading is still ongoing")
return False
elif evt_dl_status == -1:
print("history has not yet been downloaded today")
return False
return True
def get_dict_key(dict, value):
for (k, v) in dict.items():
if value in v:
return k
def ready2btest(ts_code, event, strategy_code, start_date, end_date, freq='D'):
exec_date = date.today()
completed = is_task_completed(
ts_code, event, strategy_code=get_dict_key(strategy_dict, strategy_code) if event == 'MARK_CP' else strategy_code, start_date=start_date, end_date=end_date, freq=freq)
if completed:
print('previous '+event+' is completed')
return True
print('previous '+event+' is still ongoing/ not exist')
return False
def set_task_completed(ts_code, event, freq, strategy_code, start_date, end_date):
try:
task = StockStrategyTestLog.objects.get(
ts_code=ts_code, analysis_code=strategy_code, event_type=event, freq=freq, start_date=start_date, end_date=end_date, is_done=False)
task.is_done = True
task.last_mod_time = datetime.now(timezone.utc)
task.save()
except Exception as e:
print(e)
def generate_task(listed_company, start_date, end_date, freq='D', ):
threshold_hist_fx = 365 * 3
threshold_hist_25 = 25
threshold_hist_60 = 60
threshold_hist_200 = 200
analysis_start_date = listed_company.last_analyze_date if listed_company.last_analyze_date is not None else listed_company.list_date
mark_start_date = listed_company.list_date
analysis_event_list = ['EXP_PCT_TEST', 'PERIOD_TEST',
'TGT_PCT_QTN', 'UPDN_PCT_QTN', 'UPDN_PCT_RK', 'TGT_PCT_RK']
analysis_strategy_list = ['ma25_zhicheng', 'ma25_diepo', 'ma25_yali', 'ma25_tupo',
'ma60_zhicheng', 'ma60_diepo', 'ma60_yali', 'ma60_tupo',
'jiuzhuan_count_b', 'jiuzhuan_count_s', 'dingbu_s', 'dibu_b',
'tupo_b', 'diepo_s', 'm_ding', 'w_di',
'ma200_zhicheng', 'ma200_diepo', 'ma200_yali', 'ma200_tupo']
mark_cp_event_list = ['MARK_CP']
dl_daily_event_list = ['DAILY_DOWNLOAD']
mark_strategy_set = {}
mark_strategy_dict = {
'25': {'junxian25_bs', 'jiuzhuan_bs'},
'60': {'junxian60_bs', 'dingdi', 'tupo_yali_b', 'diepo_zhicheng_s',
'wm_dingdi_bs', },
'200': {'junxian200_bs'}
}
# analysis_hist = StockHistoryDaily.objects.filter(
# ts_code=listed_company.ts_code, trade_date__gte=analysis_start_date, trade_date__lte=end_date)
# mark_hist = StockHistoryDaily.objects.filter(
# ts_code=listed_company.ts_code, trade_date__gte=mark_start_date, trade_date__lte=end_date)
# 如果hist length > 600,生成分析事件
# tasks = get_analysis_task(ts_code, event, )
for event in analysis_event_list:
for strategy in analysis_strategy_list:
try:
task = StockStrategyTestLog.objects.get(
ts_code=listed_company.ts_code, analysis_code=strategy, event_type=event, freq=freq, end_date=start_date - timedelta(days=1), is_done=False)
task.end_date = end_date
listed_company.last_analyze_date = end_date
task.save()
except Exception as e:
# print(e)
# 未找到运行记录
if days_between(mark_start_date, end_date) >= threshold_hist_fx:
# mark_strategy_set = set.union(
# mark_strategy_dict['200'], mark_strategy_dict['60'], mark_strategy_dict['25'])
task = StockStrategyTestLog(ts_code=listed_company.ts_code,
analysis_code=strategy,
event_type=event, freq=freq, start_date=start_date, end_date=end_date)
listed_company.last_analyze_date = end_date
task.save()
# listed_company.save()
# else:
# # 如果hist length < 600, >= 25, 生成MA25 标记事件
# # if len(hist) < 600 and len(hist) >= 25:
# # pass
# # 如果hist length < 600, >= 60, 生成MA60,突破,WM底,跌破,标记事件
# # if len(hist) < 600 and len(hist) >= 60:
# # pass
# # 如果hist length < 600, >= 200, 生成MA200 标记事件
# # if len(hist) < 600 and len(hist) >= 200:
# # pass
# if days_between(mark_start_date, end_date) >= threshold_hist_200:
# mark_strategy_set = set.union(
# mark_strategy_dict['200'], mark_strategy_dict['60'], mark_strategy_dict['25'])
# else:
# if days_between(mark_start_date, end_date) >= threshold_hist_60:
# mark_strategy_set = set.union(
# mark_strategy_dict['25'], mark_strategy_dict['60'])
# else:
# if days_between(mark_start_date, end_date) >= threshold_hist_25:
# mark_strategy_set = mark_strategy_dict['25']
mark_strategy_set = set.union(
mark_strategy_dict['200'], mark_strategy_dict['60'], mark_strategy_dict['25'])
for event in mark_cp_event_list:
for strategy in mark_strategy_set:
try:
# tasks = get_analysis_task(ts_code, event, )
task = StockStrategyTestLog.objects.get(
ts_code=listed_company.ts_code, analysis_code=strategy, event_type=event, freq=freq, end_date=start_date - timedelta(days=1), is_done=False)
task.end_date = end_date
# mark_log.save()
except Exception as e: # 未找到运行记录
# print(e)
# 未找到运行记录
task = StockStrategyTestLog(ts_code=listed_company.ts_code,
analysis_code=strategy,
event_type=event, freq=freq, start_date=start_date, end_date=end_date)
task.save()
for event in dl_daily_event_list:
try:
# tasks = get_analysis_task(ts_code, event, )
task = StockStrategyTestLog.objects.get(
ts_code=listed_company.ts_code, event_type=event, freq=freq, end_date=start_date - timedelta(days=1), is_done=False)
task.end_date = end_date
# mark_log.save()
except Exception as e: # 未找到运行记录
# print(e)
# 未找到运行记录
task = StockStrategyTestLog(ts_code=listed_company.ts_code,
event_type=event, freq=freq, start_date=start_date, end_date=end_date)
task.save()
def get_event_status(event, exec_date, strategy_code=None, freq='D'):
'''
前提,已经存在,在处理过程中in progress, 或已经结束finished 。。。。
1. 如果存在,就更新end date
2. 如果不存在,就创建新的
'''
# event_list = ['MARK_CP', 'PERIOD_TEST', 'EXP_PCT_TEST']
try:
if strategy_code is not None: # Mark CP
event = AnalysisEventLog.objects.get(
analysis_code=strategy_code, event_type=event, freq=freq, exec_date=exec_date)
else:
event = AnalysisEventLog.objects.get(
event_type=event, freq=freq, exec_date=exec_date)
return event.status
except Exception as e: # 未找到event log记录
print(e)
return -1
def init_eventlog(event, exec_date, strategy_code=None, freq='D'):
'''
前提,已经存在,在处理过程中in progress, 或已经结束finished 。。。。
1. 如果存在,就更新end date
2. 如果不存在,就创建新的
'''
# event_list = ['MARK_CP', 'PERIOD_TEST', 'EXP_PCT_TEST']
try:
if strategy_code is not None: # Mark CP
AnalysisEventLog.objects.get(
analysis_code=strategy_code, event_type=event, freq=freq, status__in=[0, -1], exec_date=exec_date)
else:
AnalysisEventLog.objects.get(
event_type=event, freq=freq, status__in=[0, -1], exec_date=exec_date)
except Exception as e: # 未找到event log记录
print(e)
eventlog = AnalysisEventLog(
analysis_code=strategy_code,
event_type=event, freq=freq, exec_date=exec_date)
eventlog.save()
def set_event_completed(event, exec_date, strategy_code=None, freq='D'):
'''
前提,已经存在,在处理过程中in progress, 或已经结束finished 。。。。
1. 如果存在,就更新end date
2. 如果不存在,就创建新的
'''
# event_list = ['MARK_CP', 'PERIOD_TEST', 'EXP_PCT_TEST']
try:
if strategy_code is not None: # Mark CP
event = AnalysisEventLog.objects.get(
analysis_code=strategy_code, event_type=event, freq=freq, exec_date=exec_date)
else:
event = AnalysisEventLog.objects.get(
event_type=event, freq=freq, exec_date=exec_date)
event.status = 1
event.last_mod_time = datetime.now(timezone.utc)
event.save()
return True
except Exception as e: # 未找到event log记录
print(e)
return False
def set_event_exception(event, exec_date, strategy_code=None, freq='D'):
'''
前提,已经存在,在处理过程中in progress, 或已经结束finished 。。。。
1. 如果存在,就更新end date
2. 如果不存在,就创建新的
'''
# event_list = ['MARK_CP', 'PERIOD_TEST', 'EXP_PCT_TEST']
try:
if strategy_code is not None: # Mark CP
event = AnalysisEventLog.objects.get(
analysis_code=strategy_code, event_type=event, freq=freq, exec_date=exec_date)
else:
event = AnalysisEventLog.objects.get(
event_type=event, freq=freq, exec_date=exec_date)
event.status = -1
event.save()
except Exception as e: # 未找到event log记录
print(e)
def is_task_completed(ts_code, event, strategy_code, start_date, end_date, freq):
print(start_date)
print(end_date)
print(strategy_code)
try:
task = StockStrategyTestLog.objects.get(
ts_code=ts_code, analysis_code=strategy_code, event_type=event,
start_date=start_date, end_date=end_date, freq=freq)
return task.is_done
except Exception as e:
# print(e)
return False
def hist_downloaded(ts_code, event, freq):
today = date.today()
try:
#
mark_log = StockStrategyTestLog.objects.get(
ts_code=ts_code, end_date__event_type=event, freq=freq, is_done=True)
except Exception as e:
# print(e)
return False
def last_download_date(ts_code, event, freq):
try:
# 获得上一次下载记录
log = StockStrategyTestLog.objects.filter(
ts_code=ts_code, event_type=event, freq=freq).order_by('-end_date')[0]
# print(log.start_date)
# print(log.end_date)
return [log.start_date, log.end_date]
except Exception as e:
print(e)
return None
def log_download_hist(ts_code, event, start_date, end_date, freq):
try:
mark_log = StockStrategyTestLog(ts_code=ts_code,
event_type=event, start_date=start_date, end_date=end_date, freq=freq, is_done=True)
mark_log.save()
except Exception as e:
print(e)
def get_analysis_task(ts_code, event, strategy_code, freq='D'):
try:
if strategy_code is not None:
task = StockStrategyTestLog.objects.filter(
ts_code=ts_code, analysis_code=strategy_code, event_type=event, freq=freq, is_done=False).order_by('start_date')
else:
task = StockStrategyTestLog.objects.filter(
ts_code=ts_code, event_type=event, freq=freq, is_done=False).order_by('start_date')
return task
except Exception as e:
# print(e)
return None
def get_trade_cal_diff(ts_code, last_trade, asset='E', exchange='SSE', period=4):
count = 0
offset = 0
# pro = ts.pro_api()
while count < period:
# df = pro.trade_cal(exchange=exchange, start_date=(last_trade -
# timedelta(days=offset+1)).strftime('%Y%m%d'), end_date=(last_trade-timedelta(days=offset+1)).strftime('%Y%m%d'))
# if df['is_open'].iloc[0] == 1:
# count += 1
try:
if asset == 'E':
StockHistoryDaily.objects.get(
ts_code=ts_code, trade_date=last_trade-timedelta(days=offset+1))
else:
StockIndexHistory.objects.get(
ts_code=ts_code, trade_date=last_trade-timedelta(days=offset+1))
count += 1
except Exception as e:
pass
offset += 1
# print(last_trade-timedelta(days=offset))
return offset
def get_closest_trade_cal(cur_date, exchange='SSE'):
# count = 0
offset = 0
pro = ts.pro_api()
while True:
df = pro.trade_cal(exchange=exchange, start_date=(cur_date -
timedelta(days=offset)).strftime('%Y%m%d'), end_date=(cur_date-timedelta(days=offset)).strftime('%Y%m%d'))
if df['is_open'].iloc[0] == 1:
return (cur_date - timedelta(days=offset))
offset += 1
# print(last_trade-timedelta(days=offset))
def get_trade_cal_by_attr(ts_code, last_trade, attr='jiuzhuan_count_b'):
hist = None
it_is = False
offset = 0
# pro = ts.pro_api()
while it_is:
# df = pro.trade_cal(exchange=exchange, start_date=(last_trade -
# timedelta(days=offset+1)).strftime('%Y%m%d'), end_date=(last_trade-timedelta(days=offset+1)).strftime('%Y%m%d'))
# if df['is_open'].iloc[0] == 1:
# count += 1
try:
hist = StockHistoryDaily.objects.get(
ts_code=ts_code, trade_date=last_trade-timedelta(days=offset+1))
if getattr(hist, attr) == 1:
it_is = True
except Exception as e:
pass
offset += 1
# print(last_trade-timedelta(days=offset))
return hist
def get_pct_val_from(pct_str):
pct_arr = pct_str.split('_')
pct_val = pct_arr[0][3:]
return pct_val
def get_qt_updownpct(ts_code, strategy_code, period, test_type):
result_qt = []
results = StrategyUpDownTestQuantiles.objects.filter(
strategy_code=strategy_code, ts_code=ts_code, test_period=period, test_type=test_type).order_by('test_period')
for result in results:
result_qt.append(
{
'period': result.test_period,
'qt25ile': round(result.qt_10pct, 2),
'qt50ile': round(result.qt_50pct, 2),
'qt75ile': round(result.qt_75pct, 2),
'max': round(result.max_val, 2),
'min': round(result.min_val, 2),
'mean': round(result.mean_val, 2),
}
)
return result_qt
def get_qt_period_on_exppct(ts_code, strategy_code, exp_pct):
result_qt = []
results = StrategyTargetPctTestQuantiles.objects.filter(
strategy_code=strategy_code, ts_code=ts_code, target_pct=exp_pct).order_by('test_freq')
for result in results:
result_qt.append(
{
'pct': get_pct_val_from(result.target_pct) + '%',
'qt25ile': round(result.qt_10pct, 2),
'qt50ile': round(result.qt_50pct, 2),
'qt75ile': round(result.qt_75pct, 2),
'min': round(result.min_val, 2),
'mean': round(result.mean_val, 2),
}
)
return result_qt
def get_pkdays_by_year_month(year, month):
pass
| 39.275304
| 175
| 0.595145
|
69e055da869d83f3c9a25eba3b5fffcae973b40c
| 842
|
py
|
Python
|
avgamah/modules/Music/join.py
|
thenishantsapkota/Hikari-Bot
|
0d04d679b04fd0c17aa194a5e5a885292dc8f788
|
[
"MIT"
] | 6
|
2021-08-31T04:53:09.000Z
|
2021-09-28T15:46:21.000Z
|
avgamah/modules/Music/join.py
|
thenishantsapkota/Avgamah
|
c7f1f9a69f8a3f4c4ea53b25dbf62e272750f76c
|
[
"MIT"
] | 7
|
2021-11-03T14:58:38.000Z
|
2022-03-29T23:16:21.000Z
|
avgamah/modules/Music/join.py
|
thenishantsapkota/Avgamah
|
c7f1f9a69f8a3f4c4ea53b25dbf62e272750f76c
|
[
"MIT"
] | 1
|
2021-08-31T08:04:51.000Z
|
2021-08-31T08:04:51.000Z
|
import hikari
import tanjun
from avgamah.core.client import Client
from avgamah.utils.buttons import DELETE_ROW
from . import _join
join_component = tanjun.Component()
@join_component.with_slash_command
@tanjun.with_own_permission_check(
hikari.Permissions.SEND_MESSAGES
| hikari.Permissions.VIEW_CHANNEL
| hikari.Permissions.EMBED_LINKS
| hikari.Permissions.CONNECT
| hikari.Permissions.SPEAK
)
@tanjun.as_slash_command("join", "Join a voice channel of a guild")
async def join(ctx: tanjun.abc.Context) -> None:
channel_id = await _join(ctx)
if channel_id:
embed = hikari.Embed(description=f"Joined <#{channel_id}>", color=0x00FF00)
await ctx.respond(embed=embed, component=DELETE_ROW)
@tanjun.as_loader
def load_components(client: Client):
client.add_component(join_component.copy())
| 26.3125
| 83
| 0.760095
|
d6b71b9c0cf3830e722ee0894b5fafd3034fc1f8
| 2,103
|
py
|
Python
|
correction.py
|
andrew-xu-monash/UMM-Modified
|
18729dc34733c203e8cd3873fec2b9f7d0b56dba
|
[
"Apache-2.0"
] | 4
|
2021-01-12T17:46:13.000Z
|
2022-03-01T23:36:38.000Z
|
correction.py
|
Novartis/UMM-Discovery
|
18729dc34733c203e8cd3873fec2b9f7d0b56dba
|
[
"Apache-2.0"
] | null | null | null |
correction.py
|
Novartis/UMM-Discovery
|
18729dc34733c203e8cd3873fec2b9f7d0b56dba
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Novartis Institutes for BioMedical Research Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import time
import patsy
from combat import *
import anndata as ad
from sklearn.decomposition import PCA as sk_PCA
def do_batch_correction(df, embeds_cols, batch_corrections, verbose=False):
df_corr = df.copy()
end = time.time()
if batch_corrections is not None:
for corr in batch_corrections:
if corr == "TVN":
df_corr = correct_tvn(df_corr.copy(), embeds_cols, verbose)
elif corr == "COMBAT":
df_corr = correct_combat(df_corr.copy(), embeds_cols, verbose)
else:
print("Batch correction {} is not implemented".format(corr))
if verbose:
print('batch correction time: {0:.0f} s'.format(time.time() - end))
return df_corr
# ------------------------------------------
# tvn
def correct_tvn(df, embeds_cols, verbose=False):
if verbose:
print('Do TVN')
dmso = df.loc[(df['compound'] == 'DMSO'), embeds_cols].to_numpy(copy=True)
p = sk_PCA(n_components=len(embeds_cols), whiten=True).fit(dmso)
df.loc[:, embeds_cols] = p.transform(df.loc[:, embeds_cols])
return df
# combat
def correct_combat(df, embeds_cols, verbose=False):
if verbose:
print('Do COMBAT')
# Expression
exp = df[embeds_cols].T
# Covariants
mod = patsy.dmatrix("~ compound + compound_uM", df, return_type="dataframe")
ebat = combat(exp, df['plate'], mod, "compound_uM")
df.loc[:, embeds_cols] = ebat.T
return df
| 30.478261
| 80
| 0.664765
|
3505516a3dfda13c70b059086a09ec43cf9cd8fe
| 11,667
|
py
|
Python
|
tests/test_anchor_target_layer.py
|
ghelloumi/CafeObjectReco
|
9491b655e47ee9a4e96e0634a7397330f81608c7
|
[
"MIT"
] | null | null | null |
tests/test_anchor_target_layer.py
|
ghelloumi/CafeObjectReco
|
9491b655e47ee9a4e96e0634a7397330f81608c7
|
[
"MIT"
] | null | null | null |
tests/test_anchor_target_layer.py
|
ghelloumi/CafeObjectReco
|
9491b655e47ee9a4e96e0634a7397330f81608c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.faster_rcnn.anchor_target_layer import AnchorTargetLayer
import chainer
import cv2 as cv
import numpy as np
import six
import unittest
class TestAnchorTargetLayer(unittest.TestCase):
def setUp(self):
self.feat_stride = 16
self.n_channels, self.width, self.height = 16, 14, 14
self.x = np.arange(1 * self.n_channels *
self.height * self.width, dtype=np.float32)
self.x = self.x.reshape(1, self.n_channels, self.height, self.width)
self.im_info = np.array([[224, 224, 0.85]])
self.anchor_target_layer = AnchorTargetLayer(
self.feat_stride, 2 ** np.arange(1, 6))
self.height, self.width = self.x.shape[2:]
self.shifts = self.anchor_target_layer.generate_shifts(
self.width, self.height)
self.all_anchors, self.total_anchors = \
self.anchor_target_layer.generate_proposals(self.shifts)
self.inds_inside, self.anchors = self.anchor_target_layer.keep_inside(
self.all_anchors, self.im_info)
self.gt_boxes = np.array([
[10, 10, 60, 200, 0],
[50, 100, 210, 210, 1],
[160, 40, 200, 70, 2]
])
gt_canvas = np.zeros((224, 224))
for gt in self.gt_boxes:
cv.rectangle(gt_canvas, (gt[0], gt[1]), (gt[2], gt[3]), 255)
cv.imwrite('tests/gt_boxes.png', gt_canvas)
self.argmax_overlaps, self.max_overlaps, self.gt_max_overlaps, \
self.gt_argmax_overlaps = self.anchor_target_layer.calc_overlaps(
self.anchors, self.gt_boxes, self.inds_inside)
self.argmax_overlaps, self.labels = \
self.anchor_target_layer.create_labels(
self.inds_inside, self.anchors, self.gt_boxes)
def test_generate_anchors(self):
anchor_target_layer = AnchorTargetLayer()
ret = np.array([[-83., -39., 100., 56.],
[-175., -87., 192., 104.],
[-359., -183., 376., 200.],
[-55., -55., 72., 72.],
[-119., -119., 136., 136.],
[-247., -247., 264., 264.],
[-35., -79., 52., 96.],
[-79., -167., 96., 184.],
[-167., -343., 184., 360.]]) - 1
self.assertEqual(anchor_target_layer.anchors.shape, ret.shape)
np.testing.assert_array_equal(anchor_target_layer.anchors, ret)
ret = self.anchor_target_layer.anchors
min_x = ret[:, 0].min()
min_y = ret[:, 1].min()
max_x = ret[:, 2].max()
max_y = ret[:, 3].max()
canvas = np.zeros(
(int(abs(min_y) + max_y) + 1,
int(abs(min_x) + max_x) + 1), dtype=np.uint8)
ret[:, 0] -= min_x
ret[:, 2] -= min_x
ret[:, 1] -= min_y
ret[:, 3] -= min_y
for anchor in ret:
anchor = list(six.moves.map(int, anchor))
cv.rectangle(
canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
cv.imwrite('tests/anchors.png', canvas)
def test_generate_shifts(self):
for i in range(len(self.shifts)):
self.assertEqual(self.shifts[i][0], self.shifts[i][2])
self.assertEqual(self.shifts[i][1], self.shifts[i][3])
i = 0
for y in range(self.height):
for x in range(self.width):
xx = x * self.feat_stride
yy = y * self.feat_stride
self.assertEqual(len(self.shifts[i]), 4)
self.assertEqual(self.shifts[i][0], xx)
self.assertEqual(self.shifts[i][1], yy)
self.assertEqual(self.shifts[i][2], xx)
self.assertEqual(self.shifts[i][3], yy)
i += 1
self.assertEqual(i, len(self.shifts))
min_x = self.shifts[:, 0].min()
min_y = self.shifts[:, 1].min()
max_x = self.shifts[:, 2].max()
max_y = self.shifts[:, 3].max()
canvas = np.zeros(
(int(abs(min_y) + max_y) + 1,
int(abs(min_x) + max_x) + 1), dtype=np.uint8)
shifts = self.shifts.copy()
shifts[:, 0] -= min_x
shifts[:, 2] -= min_x
shifts[:, 1] -= min_y
shifts[:, 3] -= min_y
for anchor in shifts:
anchor = list(six.moves.map(int, anchor))
cv.circle(canvas, (anchor[0], anchor[1]), 1, 255, -1)
cv.imwrite('tests/shifts.png', canvas)
def test_generate_proposals(self):
self.assertEqual(self.total_anchors, len(self.shifts) *
self.anchor_target_layer.anchors.shape[0])
min_x = self.all_anchors[:, 0].min()
min_y = self.all_anchors[:, 1].min()
max_x = self.all_anchors[:, 2].max()
max_y = self.all_anchors[:, 3].max()
canvas = np.zeros(
(int(abs(min_y) + max_y) + 1,
int(abs(min_x) + max_x) + 1), dtype=np.uint8)
self.all_anchors[:, 0] -= min_x
self.all_anchors[:, 1] -= min_y
self.all_anchors[:, 2] -= min_x
self.all_anchors[:, 3] -= min_y
for anchor in self.all_anchors:
anchor = list(six.moves.map(int, anchor))
cv.rectangle(
canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
cv.imwrite('tests/all_anchors.png', canvas)
def test_keep_inside(self):
inds_inside, anchors = self.inds_inside, self.anchors
min_x = anchors[:, 0].min()
min_y = anchors[:, 1].min()
max_x = anchors[:, 2].max()
max_y = anchors[:, 3].max()
canvas = np.zeros(
(int(max_y - min_y) + 1,
int(max_x - min_x) + 1), dtype=np.uint8)
anchors[:, 0] -= min_x
anchors[:, 1] -= min_y
anchors[:, 2] -= min_x
anchors[:, 3] -= min_y
for i, anchor in enumerate(anchors):
anchor = list(six.moves.map(int, anchor))
_canvas = np.zeros(
(int(max_y - min_y) + 1,
int(max_x - min_x) + 1), dtype=np.uint8)
cv.rectangle(
_canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
cv.rectangle(
canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
cv.imwrite('tests/anchors_inside_{}.png'.format(i), _canvas)
cv.imwrite('tests/anchors_inside.png'.format(i), canvas)
def test_calc_overlaps(self):
self.assertEqual(len(self.anchors), len(self.max_overlaps))
self.assertEqual(len(self.gt_max_overlaps), len(self.gt_boxes))
self.assertEqual(len(self.gt_argmax_overlaps), len(self.gt_boxes))
canvas = np.zeros((int(self.im_info[0, 0]), int(self.im_info[0, 1])))
for bbox in self.anchors[self.gt_argmax_overlaps]:
x1, y1, x2, y2 = list(map(int, bbox))
cv.rectangle(canvas, (x1, y1), (x2, y2), 255)
cv.imwrite('tests/max_overlap_anchors.png', canvas)
def test_create_labels(self):
self.assertEqual(len(self.labels), len(self.anchors))
neg_ids = np.where(self.labels == 0)[0]
pos_ids = np.where(self.labels == 1)[0]
ignore_ids = np.where(self.labels == -1)[0]
canvas = np.zeros((int(self.im_info[0, 0]), int(self.im_info[0, 1])))
for bbox in self.anchors[pos_ids]:
x1, y1, x2, y2 = list(map(int, bbox))
cv.rectangle(canvas, (x1, y1), (x2, y2), 255)
cv.imwrite('tests/pos_labels.png', canvas)
np.testing.assert_array_less(
self.max_overlaps[neg_ids],
self.anchor_target_layer.RPN_NEGATIVE_OVERLAP)
# np.testing.assert_array_less(
# self.anchor_target_layer.RPN_POSITIVE_OVERLAP,
# self.max_overlaps[pos_ids])
def test_calc_inside_weights(self):
bbox_inside_weights = \
self.anchor_target_layer.calc_inside_weights(
self.inds_inside, self.labels)
neg_ids = np.where(self.labels == 0)[0]
pos_ids = np.where(self.labels == 1)[0]
ignore_ids = np.where(self.labels == -1)[0]
np.testing.assert_array_equal(bbox_inside_weights[pos_ids], 1.)
np.testing.assert_array_equal(bbox_inside_weights[neg_ids], 0.)
np.testing.assert_array_equal(bbox_inside_weights[ignore_ids], 0.)
def test_calc_outside_weights(self):
self.anchor_target_layer.RPN_POSITIVE_WEIGHT = -1
bbox_outside_weights = \
self.anchor_target_layer.calc_outside_weights(
self.inds_inside, self.labels)
neg_ids = np.where(self.labels == 0)[0]
pos_ids = np.where(self.labels == 1)[0]
ignore_ids = np.where(self.labels == -1)[0]
self.assertEqual(len(np.unique(bbox_outside_weights[pos_ids])), 1)
self.assertEqual(len(np.unique(bbox_outside_weights[neg_ids])), 1)
self.assertEqual(np.unique(bbox_outside_weights[pos_ids]),
np.unique(bbox_outside_weights[neg_ids]))
np.testing.assert_array_equal(
bbox_outside_weights[pos_ids], 1. / np.sum(self.labels >= 0))
np.testing.assert_array_equal(
bbox_outside_weights[neg_ids], 1. / np.sum(self.labels >= 0))
self.anchor_target_layer.RPN_POSITIVE_WEIGHT = 0.8
bbox_outside_weights = \
self.anchor_target_layer.calc_outside_weights(
self.inds_inside, self.labels)
np.testing.assert_array_equal(
bbox_outside_weights[pos_ids], 0.8 / np.sum(self.labels == 1))
np.testing.assert_array_equal(
bbox_outside_weights[neg_ids], 0.2 / np.sum(self.labels == 0))
def test_mapup_to_anchors(self):
bbox_inside_weights = \
self.anchor_target_layer.calc_inside_weights(
self.inds_inside, self.labels)
bbox_outside_weights = \
self.anchor_target_layer.calc_outside_weights(
self.inds_inside, self.labels)
bbox_targets = np.zeros((len(self.inds_inside), 4), dtype=np.float32)
bbox_targets = self.anchor_target_layer._compute_targets(
self.anchors, self.gt_boxes[self.argmax_overlaps, :])
labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = \
self.anchor_target_layer.mapup_to_anchors(
self.labels, self.total_anchors, self.inds_inside,
bbox_targets, bbox_inside_weights, bbox_outside_weights)
self.assertEqual(len(labels), len(self.all_anchors))
self.assertEqual(len(bbox_targets), len(self.all_anchors))
self.assertEqual(len(bbox_inside_weights), len(self.all_anchors))
self.assertEqual(len(bbox_outside_weights), len(self.all_anchors))
def test_call(self):
xp = chainer.cuda.cupy
x = chainer.Variable(xp.asarray(self.x, dtype=xp.float32))
gt_boxes = self.gt_boxes
im_info = self.im_info
labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = \
self.anchor_target_layer(x, gt_boxes, im_info)
n_anchors = self.anchor_target_layer.n_anchors
self.assertEqual(labels.shape,
(1, n_anchors, self.height, self.width))
self.assertEqual(bbox_targets.shape,
(1, n_anchors * 4, self.height, self.width))
self.assertEqual(bbox_inside_weights.shape,
(1, n_anchors * 4, self.height, self.width))
self.assertEqual(bbox_outside_weights.shape,
(1, n_anchors * 4, self.height, self.width))
| 44.026415
| 78
| 0.578641
|
1da73d14744303a4c20f0da078a15eb9529a6e87
| 9,967
|
py
|
Python
|
Weather-Data-Collector/Tkinter.py
|
Sachinsingh14/Python-Projects
|
1edba3574b618bc59c68a7647217a7957c604878
|
[
"Apache-2.0"
] | 1
|
2021-10-18T14:52:42.000Z
|
2021-10-18T14:52:42.000Z
|
Weather-Data-Collector/Tkinter.py
|
Sachinsingh14/Python-Projects
|
1edba3574b618bc59c68a7647217a7957c604878
|
[
"Apache-2.0"
] | 1
|
2021-10-18T15:30:50.000Z
|
2021-10-18T15:34:24.000Z
|
Weather-Data-Collector/Tkinter.py
|
Sachinsingh14/Python-Projects
|
1edba3574b618bc59c68a7647217a7957c604878
|
[
"Apache-2.0"
] | 1
|
2021-10-18T15:20:48.000Z
|
2021-10-18T15:20:48.000Z
|
# ===== imports =======
import json
from urllib.request import urlopen
from API_key import api_key
from html.parser import HTMLParser
from tkinter import scrolledtext
import urllib.request
import tkinter as tk
from tkinter import Menu
from tkinter import ttk
# ===== functions ======
# Exit GUI cleanly
def _quit():
win.quit()
win.destroy()
exit()
# ===== procedural code =====
# Create instance
win = tk.Tk()
# Add a title
win.title("Weather Conditons")
# ---------------------------------------------------------------
# Creating a Menu Bar
menuBar = Menu()
win.config(menu=menuBar)
# Add menu items
fileMenu = Menu(menuBar, tearoff=0)
fileMenu.add_command(label="New")
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=_quit)
menuBar.add_cascade(label="File", menu=fileMenu)
# Add another Menu to the Menu Bar and an item
helpMenu = Menu(menuBar, tearoff=0)
helpMenu.add_command(label="About")
menuBar.add_cascade(label="Help", menu=helpMenu)
# ---------------------------------------------------------------
# Tab Control / Notebook
tabControl = ttk.Notebook(win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab1, text='OpenWeatherMap') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# ---------------------------------------------------------------
# TAB 1 OpenWeatherMap
######################
# We are creating a container frame to hold other widgets
open_weather_cities_frame = ttk.LabelFrame(
tab1, text=' Latest Observation for ')
open_weather_cities_frame.grid(column=0, row=0, padx=8, pady=4)
# Station City label
open_location = tk.StringVar()
ttk.Label(open_weather_cities_frame, textvariable=open_location).grid(
column=0, row=1, columnspan=3)
# ---------------------------------------------------------------
# Adding a Label
ttk.Label(open_weather_cities_frame, text="City: ").grid(column=0, row=0)
# ---------------------------------------------------------------
open_city = tk.StringVar()
open_city_combo = ttk.Combobox(
open_weather_cities_frame, width=16, textvariable=open_city)
open_city_combo['values'] = (
'Los Angeles, US', 'London, UK', 'Paris, FR', 'Mumbai, IN', 'Beijing, CN')
open_city_combo.grid(column=1, row=0)
open_city_combo.current(0) # highlight first city station id
# ---------------------------------------------------------------
# callback function
def _get_station_open():
city = open_city_combo.get()
get_open_weather_data(city)
get_weather_btn = ttk.Button(open_weather_cities_frame, text='Get Weather',
command=_get_station_open).grid(column=2, row=0)
# ---------------------------------------------------------------
for child in open_weather_cities_frame.winfo_children():
child.grid_configure(padx=5, pady=2)
# ---------------------------------------------------------------
# We are creating a container frame to hold all other widgets
open_weather_conditions_frame = ttk.LabelFrame(
tab1, text=' Current Weather Conditions ')
open_weather_conditions_frame.grid(column=0, row=1, padx=8, pady=4)
ENTRY_WIDTH = 25
# Adding Label & Textbox Entry widgets
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Last Updated:").grid(
column=0, row=1, sticky='E') # <== right-align
open_updated = tk.StringVar()
open_updatedEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_updated, state='readonly')
open_updatedEntry.grid(column=1, row=1, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Weather:").grid(
column=0, row=2, sticky='E') # <== increment row for each
open_weather = tk.StringVar()
open_weatherEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_weather, state='readonly')
# <== increment row for each
open_weatherEntry.grid(column=1, row=2, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Temperature:").grid(
column=0, row=3, sticky='E')
open_temp = tk.StringVar()
open_tempEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_temp, state='readonly')
open_tempEntry.grid(column=1, row=3, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Relative Humidity:").grid(
column=0, row=5, sticky='E')
open_rel_humi = tk.StringVar()
open_rel_humiEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_rel_humi, state='readonly')
open_rel_humiEntry.grid(column=1, row=5, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Wind:").grid(
column=0, row=6, sticky='E')
open_wind = tk.StringVar()
open_windEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_wind, state='readonly')
open_windEntry.grid(column=1, row=6, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Visibility:").grid(
column=0, row=7, sticky='E')
open_visi = tk.StringVar()
open_visiEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_visi, state='readonly')
open_visiEntry.grid(column=1, row=7, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Pressure:").grid(
column=0, row=8, sticky='E')
open_msl = tk.StringVar()
open_mslEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=open_msl, state='readonly')
open_mslEntry.grid(column=1, row=8, sticky='W')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Sunrise:").grid(
column=0, row=9, sticky='E')
sunrise = tk.StringVar()
sunriseEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=sunrise, state='readonly')
sunriseEntry.grid(column=1, row=9, sticky='E')
# ---------------------------------------------
ttk.Label(open_weather_conditions_frame, text="Sunset:").grid(
column=0, row=10, sticky='E')
sunset = tk.StringVar()
sunsetEntry = ttk.Entry(open_weather_conditions_frame,
width=ENTRY_WIDTH, textvariable=sunset, state='readonly')
sunsetEntry.grid(column=1, row=10, sticky='E')
# ---------------------------------------------
# Add some space around each widget
for child in open_weather_conditions_frame.winfo_children():
child.grid_configure(padx=4, pady=2)
# =============== OpenWeatherMap Data collection ===========
def get_open_weather_data(city='London,uk'):
city = city.replace(' ', '%20')
url = "http://api.openweathermap.org/data/2.5/weather?q={}&appid={}".format(
city, api_key)
response = urlopen(url)
data = response.read().decode()
json_data = json.loads(data)
from pprint import pprint
pprint(json_data)
lat_long = json_data['coord']
lastupdate_unix = json_data['dt']
city_id = json_data['id']
humidity = json_data['main']['humidity']
pressure = json_data['main']['pressure']
temp_kelvin = json_data['main']['temp']
city_name = json_data['name']
city_country = json_data['sys']['country']
sunrise_unix = json_data['sys']['sunrise']
sunset_unix = json_data['sys']['sunset']
try:
visibility_meter = json_data['visibility']
except:
visibility_meter = 'N/A'
owm_weather = json_data['weather'][0]['description']
weather_icon = json_data['weather'][0]['icon']
wind_deg = json_data['wind']['deg']
wind_speed_meter_sec = json_data['wind']['speed']
def kelvin_to_celsius(temp_k):
return "{:.1f}".format(temp_k - 273.15)
def kelvin_to_fahrenheit(temp_k):
return "{:.1f}".format((temp_k - 273.15) * 1.8000 + 32.00)
from datetime import datetime
def unix_to_datetime(unix_time):
return datetime.fromtimestamp(int(unix_time)
).strftime('%Y-%m-%d %H:%M:%S')
def meter_to_miles(meter):
return "{:.2f}".format((meter * 0.00062137))
if visibility_meter is 'N/A':
visibility_miles = 'N/A'
else:
visibility_miles = meter_to_miles(visibility_meter)
def mps_to_mph(meter_second):
return "{:.1f}".format((meter_second * (2.23693629)))
# -------------------------------------------------------
# Update GUI entry widgets with live data
open_location.set('{}, {}'.format(city_name, city_country))
lastupdate = unix_to_datetime(lastupdate_unix)
open_updated.set(lastupdate)
open_weather.set(owm_weather)
temp_fahr = kelvin_to_fahrenheit(temp_kelvin)
temp_cels = kelvin_to_celsius(temp_kelvin)
open_temp.set('{} \xb0F ({} \xb0C)'.format(temp_fahr, temp_cels))
open_rel_humi.set('{} %'.format(humidity))
wind_speed_mph = mps_to_mph(wind_speed_meter_sec)
open_wind.set('{} degrees at {} MPH'.format(wind_deg, wind_speed_mph))
open_visi.set('{} miles'.format(visibility_miles))
open_msl.set('{} hPa'.format(pressure))
sunrise_dt = unix_to_datetime(sunrise_unix)
sunrise.set(sunrise_dt)
sunset_dt = unix_to_datetime(sunset_unix)
sunset.set(sunset_dt)
# win.update() # required or we won't see the icon
# =========== Start GUI ===================
win.mainloop()
| 37.897338
| 96
| 0.599177
|
d66554af8629ef3cd88b52450bfaad1045183c08
| 435
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/carpet/aaxis/_minorgridcolor.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/Lib/site-packages/plotly/validators/carpet/aaxis/_minorgridcolor.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/Lib/site-packages/plotly/validators/carpet/aaxis/_minorgridcolor.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class MinorgridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="minorgridcolor", parent_name="carpet.aaxis", **kwargs
):
super(MinorgridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| 31.071429
| 80
| 0.668966
|
d3eeec9d7bec1aae5e165f2a91bbfc7244d924f5
| 1,560
|
py
|
Python
|
yatube/posts/views.py
|
an10nimus/yatube
|
2745953551f0f9b1b62d8194511cdfc567b31276
|
[
"MIT"
] | null | null | null |
yatube/posts/views.py
|
an10nimus/yatube
|
2745953551f0f9b1b62d8194511cdfc567b31276
|
[
"MIT"
] | 5
|
2021-03-19T11:47:40.000Z
|
2022-02-10T12:39:10.000Z
|
yatube/posts/views.py
|
an10nimus/yatube
|
2745953551f0f9b1b62d8194511cdfc567b31276
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from .models import Post, Group
from .forms import NewPostForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator
User = get_user_model()
def index(request):
post_list = Post.objects.order_by('-pub_date').all()
paginator = Paginator(post_list, 10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(
request,
'index.html',
{'page': page, 'paginator': paginator}
)
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
posts = group.posts.all()
paginator = Paginator(posts, 10)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(
request,
'group.html',
{'group': group, 'page': page}
)
@login_required
def new_post(request):
if request.method == 'POST':
form = NewPostForm(request.POST)
if form.is_valid():
new = form.save(commit=False)
new.author = request.user
new.save()
return redirect('index')
return render(request, 'new_post.html', {'form': form})
form = NewPostForm()
return render(request, 'new_post.html', {'form': form})
def profile(request):
user = User.objects.all()
return render(request, 'profile.html', {})
def post_view(request):
return render(request, 'post.html', {})
| 26.896552
| 64
| 0.65641
|
c5b8755b408df29d479226c25ff3f2134ab422fb
| 1,587
|
py
|
Python
|
aliyun-python-sdk-gts-phd/aliyunsdkgts_phd/request/v20200228/ListApiMsgRecordsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-gts-phd/aliyunsdkgts_phd/request/v20200228/ListApiMsgRecordsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-gts-phd/aliyunsdkgts_phd/request/v20200228/ListApiMsgRecordsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListApiMsgRecordsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gts-phd', '2020-02-28', 'ListApiMsgRecords')
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_MsgId(self):
return self.get_query_params().get('MsgId')
def set_MsgId(self,MsgId):
self.add_query_param('MsgId',MsgId)
def get_PageIndex(self):
return self.get_query_params().get('PageIndex')
def set_PageIndex(self,PageIndex):
self.add_query_param('PageIndex',PageIndex)
def get_State(self):
return self.get_query_params().get('State')
def set_State(self,State):
self.add_query_param('State',State)
| 32.387755
| 74
| 0.753623
|
392e3aa98023b7812c8a1baa91cb63eef3928a9e
| 1,312
|
py
|
Python
|
test/frcnn_test/util_test/test_bbox.py
|
shtamura/maskrcnn
|
bc4d599efe414f03dc4f73f979ac056487997c35
|
[
"MIT"
] | 21
|
2018-06-18T02:29:48.000Z
|
2021-11-08T09:31:31.000Z
|
test/frcnn_test/util_test/test_bbox.py
|
shtamura/maskrcnn
|
bc4d599efe414f03dc4f73f979ac056487997c35
|
[
"MIT"
] | null | null | null |
test/frcnn_test/util_test/test_bbox.py
|
shtamura/maskrcnn
|
bc4d599efe414f03dc4f73f979ac056487997c35
|
[
"MIT"
] | 7
|
2018-07-06T07:59:53.000Z
|
2019-01-10T05:30:18.000Z
|
import unittest
from keras import backend as K
import numpy as np
import xrcnn.util.bbox as bbox
from test.generate_random_bbox import generate_random_bbox
class TestBbox(unittest.TestCase):
def setUp(self):
self.src_bbox = generate_random_bbox(8, (64, 32), 4, 16)
self.dst_bbox = self.src_bbox + 1
def test_restore_bbox(self):
offset = bbox.get_offset(self.src_bbox, self.dst_bbox)
out_raw_bbox = bbox.get_bbox(self.src_bbox, offset)
np.testing.assert_almost_equal(
K.get_value(out_raw_bbox), K.get_value(self.dst_bbox), decimal=5)
def test_get_iou(self):
gtbox = K.variable([[1, 1, 3, 3], [2, 2, 4, 4]])
anchor = K.variable([
[1, 1, 3, 3], # gtbox[0]とは完全に一致。つまりIoU=1。
# gtbox[1]とは1/4重なる。つまりIoU=1/7。
[1, 0, 3, 2], # gtbox[0]とは半分重なる。つまりIoU=1/3。
[2, 2, 4, 4], # gtbox[0]とは1/4重なる。つまりIoU=1/7。gtbox[1]とは一致。
[0, 3, 2, 5], # gtbox[0]とは隣接。
[4, 3, 6, 5], # gtbox[0]とは接点無し。
])
expected = np.array([
[1, 1 / 7],
[1 / 3, 0],
[1 / 7, 1],
[0, 0],
[0, 0],
])
iou = K.get_value(bbox.get_iou(anchor, gtbox))
np.testing.assert_almost_equal(iou, expected, decimal=5)
| 32.8
| 77
| 0.556402
|
2127ca0b8fc41c29d00d12e8db7a8a626a02ce66
| 866
|
py
|
Python
|
ladim/utilities.py
|
pnsaevik/ladim
|
967728b291631dd5a83cb0ee041770a0a9b08313
|
[
"MIT"
] | null | null | null |
ladim/utilities.py
|
pnsaevik/ladim
|
967728b291631dd5a83cb0ee041770a0a9b08313
|
[
"MIT"
] | null | null | null |
ladim/utilities.py
|
pnsaevik/ladim
|
967728b291631dd5a83cb0ee041770a0a9b08313
|
[
"MIT"
] | null | null | null |
"""
General utilities for LADiM
"""
from typing import Any, Dict, List
import numpy as np
def timestep2stamp(config: Dict[str, Any], n: int) -> np.datetime64:
"""Convert from time step number to timestamp"""
timestamp = config["start_time"] + n * np.timedelta64(config["dt"], "s")
return timestamp
def timestamp2step(config: Dict[str, Any], timestamp: np.datetime64) -> int:
"""Convert from timestamp to time step number"""
# mtime = np.datetime64(timestamp)
dtime = np.timedelta64(timestamp - config["start_time"], "s").astype(int)
step = dtime // config["dt"]
return step
# Utility function to test for position in grid
def ingrid(x: float, y: float, subgrid: List[int]) -> bool:
"""Check if position (x, y) is in a subgrid"""
i0, i1, j0, j1 = subgrid
return (i0 <= x) & (x <= i1 - 1) & (j0 <= y) & (y <= j1 - 1)
| 30.928571
| 77
| 0.639723
|
097570eacf18a6e65ded003aa95fdeebeb0c5f6e
| 1,068
|
py
|
Python
|
enstools/io/__init__.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | 5
|
2021-12-16T14:08:00.000Z
|
2022-03-02T14:08:10.000Z
|
enstools/io/__init__.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | null | null | null |
enstools/io/__init__.py
|
wavestoweather/enstools
|
d0f612b0187b0ad54dfbbb78aa678564f46eaedf
|
[
"Apache-2.0"
] | null | null | null |
"""
Reading and Writing of meteorological data
"""
def __clean_HDF5_PLUGIN_PATH():
"""
if the libraries from hdf5plugin are in HDF5_PLUGIN_PATH, then remove them
"""
import os
import logging
if "HDF5_PLUGIN_PATH" in os.environ:
paths = os.environ["HDF5_PLUGIN_PATH"].split(":")
keep = []
for one_path in paths:
if len(one_path) == 0:
continue
if 'h5z-sz' not in one_path:
logging.info(f"removed {one_path} from HDF5_PLUGIN_PATH")
continue
keep.append(one_path)
if len(keep) > 0:
os.environ["HDF5_PLUGIN_PATH"] = ":".join(keep)
else:
del os.environ["HDF5_PLUGIN_PATH"]
# TODO: figure out why this is needed and remove it!
__clean_HDF5_PLUGIN_PATH()
from .file_type import get_file_type
from .reader import read
from .writer import write
from .dataset import drop_unused
from .compressor import compress
from .analyzer import analyze
from .cli import main as cli
from .evaluator import evaluate
| 28.864865
| 78
| 0.646067
|
8b6d7ccb4c9c131d5335fd6a137d4ace33ecef96
| 7,114
|
py
|
Python
|
tensorflow/lite/python/lite_v2_test_util.py
|
Van-an/tensorflow
|
322463c34a2fff12c8a8fd47b0ae99d7e1de1734
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/lite/python/lite_v2_test_util.py
|
govl-psb/tensorflow-1
|
60028072a1c3b4376e145b6fea8e4ccd3324377f
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/lite/python/lite_v2_test_util.py
|
govl-psb/tensorflow-1
|
60028072a1c3b4376e145b6fea8e4ccd3324377f
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
import os
from absl.testing import parameterized
import numpy as np
from six.moves import zip
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
class ModelTest(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Base test class for TensorFlow Lite 2.x model tests."""
def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
"""Evaluates the model on the `input_data`.
Args:
tflite_model: TensorFlow Lite model.
input_data: List of EagerTensor const ops containing the input data for
each input tensor.
input_shapes: List of tuples representing the `shape_signature` and the
new shape of each input tensor that has unknown dimensions.
Returns:
[np.ndarray]
"""
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
if input_shapes:
for idx, (shape_signature, final_shape) in enumerate(input_shapes):
self.assertTrue(
(input_details[idx]['shape_signature'] == shape_signature).all())
index = input_details[idx]['index']
interpreter.resize_tensor_input(index, final_shape, strict=True)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()
input_details = interpreter.get_input_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [
interpreter.get_tensor(details['index']) for details in output_details
]
def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key,
inputs):
"""Evaluates the model on the `inputs`.
Args:
tflite_model: TensorFlow Lite model.
signature_key: Signature key.
inputs: Map from input tensor names in the SignatureDef to tensor value.
Returns:
Dictionary of outputs.
Key is the output name in the SignatureDef 'signature_key'
Value is the output value
"""
interpreter = Interpreter(model_content=tflite_model)
signature_runner = interpreter.get_signature_runner(signature_key)
return signature_runner(**inputs)
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getSimpleModelWithVariables(self):
class SimpleModelWithOneVariable(tracking.AutoTrackable):
"""Basic model with 1 variable."""
def __init__(self):
super(SimpleModelWithOneVariable, self).__init__()
self.var = variables.Variable(array_ops.zeros((1, 10), name='var'))
@def_function.function
def assign_add(self, x):
self.var.assign_add(x)
return self.var
return SimpleModelWithOneVariable()
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
"""Basic model with multiple functions."""
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
@def_function.function
def mul_add(self, x, y):
if self.z is None:
self.z = variables.Variable(3.)
return x * self.z + y
return BasicModel()
def _getMultiFunctionModelWithSharedWeight(self):
class BasicModelWithSharedWeight(tracking.AutoTrackable):
"""Model with multiple functions and a shared weight."""
def __init__(self):
self.weight = constant_op.constant([1.0],
shape=(1, 512, 512, 1),
dtype=dtypes.float32)
@def_function.function
def add(self, x):
return x + self.weight
@def_function.function
def sub(self, x):
return x - self.weight
@def_function.function
def mul(self, x):
return x * self.weight
return BasicModelWithSharedWeight()
def _getMatMulModelWithSmallWeights(self):
class MatMulModelWithSmallWeights(tracking.AutoTrackable):
"""MatMul model with small weights and relatively large biases."""
def __init__(self):
self.weight = constant_op.constant([[1e-3, -1e-3], [-2e-4, 2e-4]],
shape=(2, 2),
dtype=dtypes.float32)
self.bias = constant_op.constant([1.28, 2.55],
shape=(2,),
dtype=dtypes.float32)
@def_function.function
def matmul(self, x):
return x @ self.weight + self.bias
return MatMulModelWithSmallWeights()
def _getSqrtModel(self):
"""Returns a model with only one sqrt op, to test non-quantizable op."""
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(1, 10), dtype=dtypes.float32)
])
def sqrt(x):
return math_ops.sqrt(x)
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(0, 16, size=(1, 10)).astype(np.float32)]
return sqrt, calibration_gen
def _assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_v2_test.py', file_names)
self.assertNotIn('lite_test.py', file_names)
| 34.038278
| 80
| 0.666151
|
06d7720a490e461fd2ff0b383030409b8ea2fefd
| 5,568
|
py
|
Python
|
gfx/py/residualInterpolation.py
|
valentjn/defense
|
a0ce0be1fb1832af919f804c51febfc248aefc47
|
[
"CC0-1.0"
] | 1
|
2021-01-15T16:34:33.000Z
|
2021-01-15T16:34:33.000Z
|
gfx/py/residualInterpolation.py
|
valentjn/defense
|
a0ce0be1fb1832af919f804c51febfc248aefc47
|
[
"CC0-1.0"
] | null | null | null |
gfx/py/residualInterpolation.py
|
valentjn/defense
|
a0ce0be1fb1832af919f804c51febfc248aefc47
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python3
# number of output figures = 1
import matplotlib.patches as patches
import numpy as np
from helper.figure import Figure
import helper.grid
import helper.plot
def plotNodalSpace(X, l, ax, pos, size, K=None, KColor="r", notKColor="b",
textColor="k", borderColor="k"):
xSquare = np.array([0, 1, 1, 0, 0])
ySquare = np.array([0, 0, 1, 1, 0])
s = lambda x, y: (pos[0] + size * np.array(x), pos[1] + size * np.array(y))
ax.plot(*s(xSquare, ySquare), "-", color=borderColor,
clip_on=False, zorder=-2)
if textColor is not None:
ax.text(*s(0.5, 0.02), r"$\ns{{({},{})}}$".format(*l),
color=textColor, ha="center", va="bottom")
N = X.shape[0]
if K is None: K = np.zeros((N,), dtype=bool)
ax.plot(*s(X[K,0], X[K,1]), ".", clip_on=False, color=KColor, zorder=-1)
K = np.logical_not(K)
ax.plot(*s(X[K,0], X[K,1]), ".", clip_on=False, color=notKColor, zorder=-1)
def plotSG(n, d, b, ax, pos, size):
xSquare = np.array([0, 1, 1, 0, 0])
ySquare = np.array([0, 0, 1, 1, 0])
s = lambda x, y: (pos[0] + size * np.array(x), pos[1] + size * np.array(y))
ax.plot(*s(xSquare, ySquare), "k-", clip_on=False)
ax.text(*s(0.5, 0.03), r"$\regsgspace{n}{d}$",
ha="center", va="bottom")
grid = helper.grid.RegularSparseBoundary(n, d, b)
X, L, I = grid.generate()
ax.plot(*s(X[:,0], X[:,1]), "k.", clip_on=False)
def main():
n = 3
d = 2
b = 0
subspaceSize = 1
subspaceMargin = 0.2
sgSize = 1.5
arrowMargin = 0.1
startEndArrowLength = 0.5
scaleArrowHead = 2.5
L = [(i, n-i) for i in range(n+1)]
brightness = 0.4
schemeSize = (n + 1) * (subspaceSize + subspaceMargin) - subspaceMargin
xOffsetGlobal = 0
yOffsetGlobal = schemeSize
hiddenColor = 3*[0.7]
Xs = [helper.grid.getCoordinates(l, helper.grid.getNodalIndices(l))
for l in L]
fig = Figure.create(figsize=(5, 3), scale=1.1, facecolor="none")
ax = fig.gca()
for l0 in range(n+1):
for l1 in range(n+1):
l = (l0, l1)
X = helper.grid.getCoordinates(l, helper.grid.getNodalIndices(l))
xOffset = (xOffsetGlobal +
l[0] * (subspaceSize + subspaceMargin))
yOffset = (yOffsetGlobal -
l[1] * (subspaceSize + subspaceMargin) - subspaceSize)
if l not in L:
plotNodalSpace(X, l, ax, (xOffset, yOffset), subspaceSize,
notKColor=hiddenColor, borderColor=hiddenColor,
textColor=None)
else:
q = L.index(l)
XUnion = (np.unique(np.vstack(Xs[q+1:]), axis=0)
if q < len(L) - 1 else np.zeros((0, d)))
N = X.shape[0]
K = np.zeros((N,), dtype=bool)
for k in range(N): K[k] = not (XUnion == X[k,:]).all(axis=1).any()
plotNodalSpace(X, l, ax, (xOffset, yOffset), subspaceSize,
K=K, KColor="C0", notKColor="C1")
if q == 0:
arrowEnd = (xOffset - arrowMargin, yOffset + subspaceSize / 2)
arrowStart = (arrowEnd[0] - startEndArrowLength, arrowEnd[1])
helper.plot.plotArrow(ax, arrowStart, arrowEnd,
scaleHead=scaleArrowHead)
ax.text(
arrowStart[0] - 0.08, arrowStart[1] + 0.15,
r"$y^{{({})}}_{{\*l,\*i}} = 0,$".format(0),
ha="right", va="center")
ax.text(
arrowStart[0] - 0.08, arrowStart[1] - 0.15,
(r"$r^{{({})}}(\gp{{\*l,\*i}}) = "
r"\objfun(\gp{{\*l,\*i}})$").format(0),
ha="right", va="center")
elif q == len(L) - 1:
arrowStart = (xOffset + subspaceSize + arrowMargin,
yOffset + subspaceSize / 2)
arrowEnd = (arrowStart[0] + startEndArrowLength, arrowStart[1])
helper.plot.plotArrow(ax, arrowStart, arrowEnd,
scaleHead=scaleArrowHead)
ax.text(
arrowEnd[0] + 0.08, arrowEnd[1] + 0.15,
r"$y^{{({})}}_{{\*l,\*i}} = "
r"\surplus{{\*l,\*i}},$".format(q+1),
ha="left", va="center")
ax.text(
arrowEnd[0] + 0.08, arrowEnd[1] - 0.15,
r"$r^{{({})}}(\gp{{\*l,\*i}}) = 0$".format(q+1),
ha="left", va="center")
if q < len(L) - 1:
t = np.linspace(-np.pi/2, 0, 200)
r = subspaceSize / 2 + subspaceMargin - arrowMargin
center = (xOffset + subspaceSize + arrowMargin,
yOffset + subspaceSize + subspaceMargin - arrowMargin)
#swap = (q == len(L) - 2)
swap = False
if swap: center, t = center[::-1], np.pi/2 - t
circle = lambda t: (center[0] + r * np.cos(t),
center[1] + r * np.sin(t))
helper.plot.plotArrowPolygon(ax, *circle(t), "k-",
scaleHead=scaleArrowHead)
ax.text(
*circle(-np.pi/4 + (np.pi if swap else 0)),
r"\contour{{white}}{{$y^{{({})}}_{{\*l,\*i}},\, "
r"r^{{({})}}(\gp{{\*l,\*i}})$}}".format(q+1, q+1),
ha=("right" if swap else "left"),
va=("bottom" if swap else "top"))
#plotSG(n, d, b, ax, (0, yOffsetGlobal - sgSize), sgSize)
ax.set_aspect("equal")
ax.set_xlim([-1.1*subspaceSize, xOffsetGlobal + schemeSize + subspaceSize])
ax.set_ylim([0, yOffsetGlobal])
ax.set_axis_off()
fig.save()
if __name__ == "__main__":
main()
| 34.37037
| 77
| 0.507902
|
15d3252b739a8e33bdcbeb67420455a441ce46ab
| 1,121
|
py
|
Python
|
data/p4VQE/R1/benchmark/startPyquil101.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startPyquil101.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startPyquil101.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += CZ(3,2) # number=8
prog += H(2) # number=9
prog += Z(3) # number=5
prog += H(2) # number=3
prog += H(3) # number=4
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil101.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 22.42
| 64
| 0.603925
|
c22cdf7b0450254198c06a58e33cabca338fbc47
| 362
|
py
|
Python
|
top/api/rest/WdtDictLogisticsQueryRequest.py
|
SAMZONG/taobao-sdk-python3
|
202a9df2085229838541713bd24433a90d07c7fc
|
[
"MIT"
] | null | null | null |
top/api/rest/WdtDictLogisticsQueryRequest.py
|
SAMZONG/taobao-sdk-python3
|
202a9df2085229838541713bd24433a90d07c7fc
|
[
"MIT"
] | null | null | null |
top/api/rest/WdtDictLogisticsQueryRequest.py
|
SAMZONG/taobao-sdk-python3
|
202a9df2085229838541713bd24433a90d07c7fc
|
[
"MIT"
] | null | null | null |
'''
Created by auto_sdk on 2020.06.01
'''
from top.api.base import RestApi
class WdtDictLogisticsQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.page_no = None
self.page_size = None
self.sid = None
def getapiname(self):
return 'hu3cgwt0tc.wdt.dict.logistics.query'
| 25.857143
| 56
| 0.720994
|
e7f4ca5d787fd9d08f48924e7e5eb8301f619790
| 407
|
py
|
Python
|
dashboard/context_processors.py
|
yuandrew/my-learning-analytics
|
9d15d54799ea29d07d04c6dde6e733ec17fb1412
|
[
"Apache-2.0"
] | null | null | null |
dashboard/context_processors.py
|
yuandrew/my-learning-analytics
|
9d15d54799ea29d07d04c6dde6e733ec17fb1412
|
[
"Apache-2.0"
] | null | null | null |
dashboard/context_processors.py
|
yuandrew/my-learning-analytics
|
9d15d54799ea29d07d04c6dde6e733ec17fb1412
|
[
"Apache-2.0"
] | 1
|
2020-11-16T19:59:02.000Z
|
2020-11-16T19:59:02.000Z
|
from dashboard.common import db_util
from dashboard.common import utils
import logging
logger = logging.getLogger(__name__)
def get_git_version_info(request):
return {'git_version': utils.get_git_version_info()}
def get_myla_globals(request):
return {'myla_globals': utils.get_myla_globals(request.user)}
def last_updated(request):
return {'last_updated': db_util.get_canvas_data_date()}
| 22.611111
| 65
| 0.786241
|
78b44e0c86e31505ad8f2a50b738ae4e13ee564d
| 3,081
|
py
|
Python
|
shaka/tools/gen_eme_plugins.py
|
Mousmi122767/shaka-player-embedded
|
10d9b5e0ec737c714c7d40c62593b9fae8514a36
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 185
|
2018-11-06T06:04:44.000Z
|
2022-03-02T22:20:39.000Z
|
shaka/tools/gen_eme_plugins.py
|
Mousmi122767/shaka-player-embedded
|
10d9b5e0ec737c714c7d40c62593b9fae8514a36
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 211
|
2018-11-15T22:52:49.000Z
|
2022-03-02T18:46:20.000Z
|
shaka/tools/gen_eme_plugins.py
|
Mousmi122767/shaka-player-embedded
|
10d9b5e0ec737c714c7d40c62593b9fae8514a36
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 52
|
2018-12-12T11:00:46.000Z
|
2022-02-23T17:35:02.000Z
|
#!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a .cc file that registers the default EME implementations.
This defines the following function:
void shaka::RegisterDefaultKeySystems();
"""
from __future__ import print_function
import argparse
import json
import os
import sys
import embed_utils
def _GetHeaders(plugins):
"""Returns a set of headers from the given plugins."""
headers = set()
for plugin in plugins:
headers.update(i['header'] for i in plugin['implementations'])
return headers
def _ParsePlugin(file_path):
"""Reads the given file and parses it into an object."""
with open(file_path, 'r') as f:
return json.load(f)
def GenerateFile(plugins, output):
"""Generates a C++ file which registers the given implementations."""
writer = embed_utils.CodeWriter(output)
writer.Write('#include <atomic>')
writer.Write()
writer.Write('#include "shaka/eme/implementation_registry.h"')
writer.Write()
for header in sorted(_GetHeaders(plugins)):
writer.Write('#include "%s"', header)
writer.Write()
with writer.Namespace('shaka'):
writer.Write('void RegisterDefaultKeySystems();')
writer.Write()
with writer.Block('void RegisterDefaultKeySystems()'):
# This ensures the key systems are registered exactly once, even if this
# is called from multiple threads. The compare_exchange_strong will
# atomically check if it is false and replace with true on only one
# thread.
writer.Write('static std::atomic<bool> called{false};')
writer.Write('bool expected = false;')
with writer.Block('if (called.compare_exchange_strong(expected, true))'):
for plugin in plugins:
for impl in plugin['implementations']:
writer.Write('eme::ImplementationRegistry::AddImplementation(')
writer.Write(' "%s",', impl['key_system'])
writer.Write(
' std::shared_ptr<eme::ImplementationFactory>{new %s});',
impl['factory_type'])
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output', dest='output',
help='The filename to output to')
parser.add_argument('files', nargs='+',
help='The JSON files that define the implementations')
ns = parser.parse_args(args)
plugins = map(_ParsePlugin, ns.files)
with open(ns.output, 'w') as output:
GenerateFile(plugins, output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 31.438776
| 79
| 0.694904
|
fe5dac154cd9168308320b7e5a3978a37158ce41
| 942
|
py
|
Python
|
7_Greedy/2.py
|
allenalvin333/Hackerrank_Interview
|
b40981ef55d04fb14d81a6e1c9ade1878f59394d
|
[
"MIT"
] | 2
|
2021-11-21T07:59:17.000Z
|
2021-11-25T13:41:50.000Z
|
7_Greedy/2.py
|
allenalvin333/Hackerrank_Interview
|
b40981ef55d04fb14d81a6e1c9ade1878f59394d
|
[
"MIT"
] | null | null | null |
7_Greedy/2.py
|
allenalvin333/Hackerrank_Interview
|
b40981ef55d04fb14d81a6e1c9ade1878f59394d
|
[
"MIT"
] | 1
|
2021-09-20T16:20:07.000Z
|
2021-09-20T16:20:07.000Z
|
# https://www.hackerrank.com/challenges/luck-balance/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'luckBalance' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. 2D_INTEGER_ARRAY contests
#
def luckBalance(k, contests):
contests.sort(reverse=True)
ans = 0
for c in contests:
if (c[1]==0): ans+=c[0]
elif(k>0): ans,k=ans+c[0],k-1
else: ans-=c[0]
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
contests = []
for _ in range(n):
contests.append(list(map(int, input().rstrip().split())))
result = luckBalance(k, contests)
fptr.write(str(result) + '\n')
fptr.close()
| 20.042553
| 65
| 0.643312
|
f45d087b9a904001bc86e27324c7095eb6b4082b
| 4,668
|
py
|
Python
|
settings.py
|
tsuyukimakoto/django-every_resources
|
caf6fd55c9d1f1a7a611bbbe740d1ee59ecdd14c
|
[
"BSD-3-Clause"
] | null | null | null |
settings.py
|
tsuyukimakoto/django-every_resources
|
caf6fd55c9d1f1a7a611bbbe740d1ee59ecdd14c
|
[
"BSD-3-Clause"
] | null | null | null |
settings.py
|
tsuyukimakoto/django-every_resources
|
caf6fd55c9d1f1a7a611bbbe740d1ee59ecdd14c
|
[
"BSD-3-Clause"
] | null | null | null |
# Django settings for every_resources project.
from django.conf import global_settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
import os
BASE_DIR = os.path.dirname(__file__)
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'tsuyukimakoto' # Or path to database file if using sqlite3.
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'test.data' # Or path to database file if using sqlite3.
DATABASE_USER = 'tsuyukimakoto' # Not used with sqlite3.
DATABASE_PASSWORD = 'tsuyukimakoto' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Japan'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'ja'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'everes_theme_default/templates/static/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '7(akr)kbm!7(+994q+kv2gh+9&5q%c5m%*0!lhct*1n_(!w'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
#'everes_core.utils.DBDebugMiddleware',
)
ROOT_URLCONF = 'django-every_resources.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.markup',
'django.contrib.flatpages',
'django.contrib.admindocs',
'everes_core',
'everes_functional_workflow',
'everes_functional_feed',
'everes_blog',
'everes_event',
'everes_diigo',
'everes_magnolia',
'everes_photo',
'everes_project',
'everes_note',
'everes_release',
'everes_theme_sea',
'everes_theme_default',
)
AUTH_PROFILE_MODULE = 'everes_core.cmsUserprofile'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'everes_core.context_processors.site',
'everes_core.context_processors.api_keys',
'everes_core.context_processors.everes_apps',
'everes_core.context_processors.everes_tags',
'everes_core.context_processors.everes_days',
'everes_core.context_processors.everes_root_template',
)
GOOGLE_API_KEY = 'ABQIAAAAqqFWuvyrfIOifreBS0pD6BQCULP4XOMyhPd8d_NrQQEO8sT8XBTc4moQjlQsvb09T-mbSfECKD_dzQ'
GOOGLE_ANALYTICS = None
GOOGLE_SEARCH = None
GOOGLE_ADSENSE = None
GOOGLE_AD_SLOT = None
RESTRUCTUREDTEXT_FILTER_SETTINGS = {
'doctitle_xform': False,
}
PAGENT_BY = 10
USE_WORKFLOW = 'everes_functional_workflow' in INSTALLED_APPS or False
# minutes that limit user feedback.
FEEDBACK_UUID_TIMEOUT = 5
#### photo settings ####
ADDITIONAL_IMAGES = (('thumb', (128,128)), ('listing', (320, 80)), )
| 33.826087
| 108
| 0.733719
|
cc45d5bfea7f03cde8a99b71dbdd95a44eb2ff8d
| 129
|
py
|
Python
|
kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/__about__.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | null | null | null |
kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/__about__.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | null | null | null |
kube_apiserver_metrics/datadog_checks/kube_apiserver_metrics/__about__.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T13:35:17.000Z
|
2019-12-23T13:35:17.000Z
|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '1.2.1'
| 25.8
| 59
| 0.713178
|
c2588bca0d9324f8b466768102800c7402f5e818
| 1,515
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_add_new_contact.py
|
AshayNeema/gaia
|
01a19813b5577b987335becf6b15734e4fe4057d
|
[
"Apache-2.0"
] | 1
|
2015-03-19T19:15:33.000Z
|
2015-03-19T19:15:33.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_add_new_contact.py
|
AmyYLee/gaia
|
a5dbae8235163d7f985bdeb7d649268f02749a8b
|
[
"Apache-2.0"
] | 1
|
2021-11-01T18:29:16.000Z
|
2021-11-01T18:29:16.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/contacts/test_add_new_contact.py
|
AmyYLee/gaia
|
a5dbae8235163d7f985bdeb7d649268f02749a8b
|
[
"Apache-2.0"
] | 1
|
2021-08-20T11:26:36.000Z
|
2021-08-20T11:26:36.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.mocks.mock_contact import MockContact
from gaiatest.apps.contacts.app import Contacts
class TestContacts(GaiaTestCase):
def test_add_new_contact(self):
# https://moztrap.mozilla.org/manage/case/1309/
self.contact = MockContact()
contacts_app = Contacts(self.marionette)
contacts_app.launch()
new_contact_form = contacts_app.tap_new_contact()
# Enter data into fields
new_contact_form.type_given_name(self.contact['givenName'])
new_contact_form.type_family_name(self.contact['familyName'])
new_contact_form.type_phone(self.contact['tel'][0]['value'])
new_contact_form.type_email(self.contact['email'][0]['value'])
new_contact_form.type_street(self.contact['adr'][0]['streetAddress'])
new_contact_form.type_zip_code(self.contact['adr'][0]['postalCode'])
new_contact_form.type_city(self.contact['adr'][0]['locality'])
new_contact_form.type_country(self.contact['adr'][0]['countryName'])
new_contact_form.type_comment(self.contact['note'])
new_contact_form.tap_done()
self.wait_for_condition(lambda m: len(contacts_app.contacts) == 1)
self.assertEqual(contacts_app.contacts[0].name, self.contact['givenName'][0])
| 39.868421
| 85
| 0.712871
|
3a7a62be42a861e1091ca6658c337f36b05ae358
| 10,414
|
py
|
Python
|
src/finite_arm/agent_finite.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | null | null | null |
src/finite_arm/agent_finite.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | null | null | null |
src/finite_arm/agent_finite.py
|
AbhinavGopal/ts_tutorial
|
147ff28dc507172774693f225071f8e244e5994e
|
[
"MIT"
] | null | null | null |
"""Finite bandit agents."""
import numpy as np
import random as rd
from scipy.stats import beta
from base.agent import Agent
from base.agent import random_argmax
_SMALL_NUMBER = 1e-10
##############################################################################
class FiniteBernoulliBanditEpsilonGreedy(Agent):
"""Simple agent made for finite armed bandit problems."""
def __init__(self, n_arm, a0=1, b0=1, epsilon=0.0):
self.n_arm = n_arm
self.epsilon = epsilon
self.prior_success = np.array([a0 for arm in range(n_arm)])
self.prior_failure = np.array([b0 for arm in range(n_arm)])
def set_prior(self, prior_success, prior_failure):
# Overwrite the default prior
self.prior_success = np.array(prior_success)
self.prior_failure = np.array(prior_failure)
def get_posterior_mean(self):
return self.prior_success / (self.prior_success + self.prior_failure)
def get_posterior_sample(self):
return np.random.beta(self.prior_success, self.prior_failure)
def update_observation(self, observation, action, reward):
# Naive error checking for compatibility with environment
assert observation == self.n_arm
if np.isclose(reward, 1):
self.prior_success[action] += 1
elif np.isclose(reward, 0):
self.prior_failure[action] += 1
else:
raise ValueError('Rewards should be 0 or 1 in Bernoulli Bandit')
def pick_action(self, observation):
"""Take random action prob epsilon, else be greedy."""
if np.random.rand() < self.epsilon:
action = np.random.randint(self.n_arm)
else:
posterior_means = self.get_posterior_mean()
action = random_argmax(posterior_means)
return action
##############################################################################
class FiniteBernoulliBanditTS(FiniteBernoulliBanditEpsilonGreedy):
"""Thompson sampling on finite armed bandit."""
def __init__(self, n_arm, a0=1, b0=1, epsilon=0.0):
super().__init__(n_arm, a0=1, b0=1, epsilon=0.0)
self.count = 0
def pick_action(self, observation):
"""Thompson sampling with Beta posterior for action selection."""
# self.count += 1
# print('TS Count:', self.count)
sampled_means = self.get_posterior_sample()
action = random_argmax(sampled_means)
return action
##############################################################################
class FiniteBernoulliBanditUCB(FiniteBernoulliBanditEpsilonGreedy):
def __init__(self, n_arm, a0=1, b0=1, epsilon=0.0):
super().__init__(n_arm, a0=1, b0=1, epsilon=0.0)
self.c = 3
self.count = 0
def pick_action(self, observation):
# Pick the best one with consideration of upper confidence bounds.
self.count += 1
# print("Count:", self.count)
i = max(
range(self.n_arm),
key=lambda x: self.prior_success[x] / float(self.prior_success[x] + self.prior_failure[x]) + beta.std(
self.prior_success[x], self.prior_failure[x]) * self.c
)
return i
##############################################################################
class FiniteBernoulliBanditIDS(FiniteBernoulliBanditEpsilonGreedy):
"""IDS"""
def __init__(self, n_arm, a0=1, b0=1, epsilon=0.0):
super().__init__(n_arm, a0=1, b0=1, epsilon=0.0)
self.optimal_arm = None
self.flag = False
self.threshold = 0.99
def init_prior(self, a0=1, a1=1):
"""
Init Beta prior
:param a0: int, multiplicative factor for alpha
:param a1: int, multiplicative factor for beta
:return: np.arrays, prior values (alpha, beta) for each earm
"""
beta1 = a0 * np.ones(self.n_arm).astype(int)
beta2 = a1 * np.ones(self.n_arm).astype(int)
return beta1, beta2
def pick_action(self, observation, M=10000, VIDS=False):
beta1, beta2 = self.init_prior()
Maap, p_a = np.zeros((self.n_arm, self.n_arm)), np.zeros(self.n_arm)
thetas = np.array([np.random.beta(beta1[arm], beta2[arm], M) for arm in range(self.n_arm)])
if not self.flag:
if np.max(p_a) >= self.threshold:
# Stop learning policy
self.flag = True
arm = self.optimal_arm
else:
arm, p_a = self.computeIDS(Maap, p_a, thetas, M, VIDS)
else:
arm = self.optimal_arm
return arm
def computeIDS(self, Maap, p_a, thetas, M, VIDS=False):
mu, theta_hat = np.mean(thetas, axis=1), np.argmax(thetas, axis=0)
for a in range(self.n_arm):
mu[a] = np.mean(thetas[a])
for ap in range(self.n_arm):
t = thetas[ap, np.where(theta_hat == a)]
Maap[ap, a] = np.nan_to_num(np.mean(t))
if ap == a:
p_a[a] = t.shape[1]/M
if np.max(p_a) >= self.threshold:
# Stop learning policy
self.optimal_arm = np.argmax(p_a)
arm = self.optimal_arm
else:
rho_star = sum([p_a[a] * Maap[a, a] for a in range(self.n_arm)])
delta = rho_star - mu
if VIDS:
v = np.array([sum([p_a[ap] * (Maap[a, ap] - mu[a]) ** 2 for ap in range(self.n_arm)])
for a in range(self.n_arm)])
arm = self.rd_argmax(-delta ** 2 / v)
else:
g = np.array([sum([p_a[ap] * (Maap[a, ap] * np.log(Maap[a, ap]/mu[a]+1e-10) +
(1-Maap[a, ap]) * np.log((1-Maap[a, ap])/(1-mu[a])+1e-10))
for ap in range(self.n_arm)]) for a in range(self.n_arm)])
arm = self.IDSAction(delta, g)
return arm, p_a
def IDSAction(self, delta, g):
Q = np.zeros((self.n_arm, self.n_arm))
IR = np.ones((self.n_arm, self.n_arm)) * np.inf
q = np.linspace(0, 1, 1000)
for a in range(self.n_arm - 1):
for ap in range(a + 1, self.n_arm):
if g[a] < 1e-6 or g[ap] < 1e-6:
return self.rd_argmax(-g)
da, dap, ga, gap = delta[a], delta[ap], g[a], g[ap]
qaap = q[self.rd_argmax(-(q * da + (1 - q) * dap) ** 2 / (q * ga + (1 - q) * gap))]
IR[a, ap] = (qaap * (da - dap) + dap) ** 2 / (qaap * (ga - gap) + gap)
Q[a, ap] = qaap
amin = self.rd_argmax(-IR.reshape(self.n_arm * self.n_arm))
a, ap = amin // self.n_arm, amin % self.n_arm
b = np.random.binomial(1, Q[a, ap])
arm = int(b * a + (1 - b) * ap)
return arm
def rd_argmax(self, vector):
m = np.amax(vector)
indices = np.nonzero(vector == m)[0]
return rd.choice(indices)
##############################################################################
class FiniteBernoulliBanditBootstrap(FiniteBernoulliBanditTS):
"""Bootstrapped Thompson sampling on finite armed bandit."""
def get_posterior_sample(self):
"""Use bootstrap resampling instead of posterior sample."""
total_tries = self.prior_success + self.prior_failure
prob_success = self.prior_success / total_tries
boot_sample = np.random.binomial(total_tries, prob_success) / total_tries
return boot_sample
##############################################################################
class FiniteBernoulliBanditLaplace(FiniteBernoulliBanditTS):
"""Laplace Thompson sampling on finite armed bandit."""
def get_posterior_sample(self):
"""Gaussian approximation to posterior density (match moments)."""
(a, b) = (self.prior_success + 1e-6 - 1, self.prior_failure + 1e-6 - 1)
# The modes are not well defined unless alpha, beta > 1
assert np.all(a > 0)
assert np.all(b > 0)
mode = a / (a + b)
hessian = a / mode + b / (1 - mode)
laplace_sample = mode + np.sqrt(1 / hessian) * np.random.randn(self.n_arm)
return laplace_sample
##############################################################################
class DriftingFiniteBernoulliBanditTS(FiniteBernoulliBanditTS):
"""Thompson sampling on finite armed bandit."""
def __init__(self, n_arm, a0=1, b0=1, gamma=0.01):
self.n_arm = n_arm
self.a0 = a0
self.b0 = b0
self.prior_success = np.array([a0 for arm in range(n_arm)])
self.prior_failure = np.array([b0 for arm in range(n_arm)])
self.gamma = gamma
def update_observation(self, observation, action, reward):
# Naive error checking for compatibility with environment
assert observation == self.n_arm
# All values decay slightly, observation updated
self.prior_success = self.prior_success * (
1 - self.gamma) + self.a0 * self.gamma
self.prior_failure = self.prior_failure * (
1 - self.gamma) + self.b0 * self.gamma
self.prior_success[action] += reward
self.prior_failure[action] += 1 - reward
##############################################################################
class FiniteBernoulliBanditLangevin(FiniteBernoulliBanditTS):
'''Langevin method for approximate posterior sampling.'''
def __init__(self,n_arm, step_count=100,step_size=0.01,a0=1, b0=1, epsilon=0.0):
FiniteBernoulliBanditTS.__init__(self,n_arm, a0, b0, epsilon)
self.step_count = step_count
self.step_size = step_size
def project(self,x):
'''projects the vector x onto [_SMALL_NUMBER,1-_SMALL_NUMBER] to prevent
numerical overflow.'''
return np.minimum(1-_SMALL_NUMBER,np.maximum(x,_SMALL_NUMBER))
def compute_gradient(self,x):
grad = (self.prior_success-1)/x - (self.prior_failure-1)/(1-x)
return grad
def compute_preconditioners(self,x):
second_derivatives = (self.prior_success-1)/(x**2) + (self.prior_failure-1)/((1-x)**2)
second_derivatives = np.maximum(second_derivatives,_SMALL_NUMBER)
preconditioner = np.diag(1/second_derivatives)
preconditioner_sqrt = np.diag(1/np.sqrt(second_derivatives))
return preconditioner,preconditioner_sqrt
def get_posterior_sample(self):
(a, b) = (self.prior_success + 1e-6 - 1, self.prior_failure + 1e-6 - 1)
# The modes are not well defined unless alpha, beta > 1
assert np.all(a > 0)
assert np.all(b > 0)
x_map = a / (a + b)
x_map = self.project(x_map)
preconditioner, preconditioner_sqrt=self.compute_preconditioners(x_map)
x = x_map
for i in range(self.step_count):
g = self.compute_gradient(x)
scaled_grad = preconditioner.dot(g)
scaled_noise= preconditioner_sqrt.dot(np.random.randn(self.n_arm))
x = x + self.step_size*scaled_grad + np.sqrt(2*self.step_size)*scaled_noise
x = self.project(x)
return x
| 36.669014
| 110
| 0.603995
|
14084a289a59980fa8a35e9cc1af0ad4f4acae97
| 205
|
py
|
Python
|
example/tests/integration_test.py
|
stickperson/test-finder
|
d26ae56308d720deb859480200c53aedceb07b3d
|
[
"MIT"
] | null | null | null |
example/tests/integration_test.py
|
stickperson/test-finder
|
d26ae56308d720deb859480200c53aedceb07b3d
|
[
"MIT"
] | null | null | null |
example/tests/integration_test.py
|
stickperson/test-finder
|
d26ae56308d720deb859480200c53aedceb07b3d
|
[
"MIT"
] | null | null | null |
import unittest
from example.code.thing import MyClass
class MyClassTestCase(unittest.TestCase):
def test_my_class(self):
my_class = MyClass()
self.assertEqual(my_class.true(), True)
| 22.777778
| 47
| 0.726829
|
0214b1418689909b151e8886adb34dc3bbef66b6
| 4,689
|
py
|
Python
|
fsb5/vorbis_headers2.py
|
aspadm/DEMD-database-old
|
6bc3407f5260b9c85dc050de676b2d07723f4632
|
[
"MIT"
] | 3
|
2020-02-23T04:22:06.000Z
|
2021-06-02T22:24:36.000Z
|
fsb5/vorbis_headers2.py
|
aspadm/DEMD-database
|
6bc3407f5260b9c85dc050de676b2d07723f4632
|
[
"MIT"
] | null | null | null |
fsb5/vorbis_headers2.py
|
aspadm/DEMD-database
|
6bc3407f5260b9c85dc050de676b2d07723f4632
|
[
"MIT"
] | 1
|
2020-07-10T23:11:29.000Z
|
2020-07-10T23:11:29.000Z
|
lookup = {
2959665: (2, 2, 48000),
43260314: (9, 2, 32000),
84231274: (5, 2, 32000),
118203318: (32, 1, 32000),
138890043: (27, 1, 44100),
145177876: (3, 1, 48000),
158483422: (9, 1, 11000),
177207038: (100, 2, 24000),
252678865: (1, 2, 44100),
325328602: (12, 2, 16000),
348001315: (28, 2, 32000),
351620542: (5, 1, 8000),
380530178: (100, 1, 24000),
391247061: (45, 1, 44100),
400623349: (32, 2, 32000),
465229062: (54, 2, 32000),
470329087: (45, 1, 32000),
528504586: (29, 1, 32000),
534748700: (11, 2, 44100),
540843937: (24, 2, 44100),
542711418: (63, 2, 48000),
658483745: (18, 2, 32000),
685149428: (15, 1, 24000),
686818183: (27, 2, 44100),
697353884: (6, 2, 48000),
701843367: (55, 1, 48000),
732013636: (51, 1, 48000),
797824567: (12, 1, 48000),
821010755: (23, 2, 48000),
894604746: (37, 1, 48000),
900231016: (28, 1, 32000),
912302853: (41, 2, 44100),
927462883: (54, 1, 48000),
950688206: (41, 2, 48000),
977525705: (14, 2, 22050),
1005931288: (45, 2, 48000),
1009642215: (100, 2, 11000),
1038446679: (6, 2, 44100),
1065107905: (36, 2, 48000),
1085414736: (9, 2, 44100),
1105346720: (44, 1, 44100),
1266232237: (54, 2, 24000),
1270016207: (30, 2, 44100),
1281683648: (39, 2, 48000),
1285833030: (14, 2, 48000),
1332976685: (27, 1, 32000),
1345740640: (3, 2, 24000),
1422371739: (3, 2, 48000),
1433931663: (30, 2, 32000),
1436573739: (13, 2, 24000),
1458089225: (90, 1, 48000),
1461483860: (72, 1, 48000),
1560547591: (41, 1, 44100),
1603895032: (1, 1, 8000),
1643397526: (49, 1, 44100),
1643912704: (9, 1, 32000),
1653142299: (12, 2, 44100),
1663770540: (27, 1, 48000),
1746251808: (3, 1, 24000),
1762515115: (23, 2, 32000),
1762652925: (2, 2, 44100),
1768600017: (3, 2, 8000),
1768710121: (37, 2, 32000),
1777296130: (90, 2, 48000),
1789727676: (54, 1, 32000),
1795288875: (53, 2, 48000),
1804123474: (55, 1, 44100),
1807333418: (5, 2, 8000),
1820829487: (55, 2, 44100),
1830608784: (51, 2, 44100),
1832501054: (100, 2, 48000),
1850991779: (29, 2, 48000),
1884272766: (19, 1, 32000),
1922611666: (9, 1, 44100),
1992774240: (15, 2, 44100),
2016338926: (45, 1, 48000),
2018650477: (8, 2, 32000),
2022171652: (9, 2, 11000),
2037138018: (43, 2, 48000),
2037639952: (36, 1, 32000),
2067692424: (6, 1, 48000),
2074838390: (21, 2, 32000),
2089645336: (45, 2, 44100),
2098335793: (51, 1, 44100),
2104318331: (49, 2, 44100),
2112178363: (33, 2, 48000),
2143468527: (56, 2, 48000),
2159945947: (5, 1, 44100),
2194868618: (33, 2, 44100),
2197615459: (2, 2, 32000),
2216268436: (20, 1, 44100),
2227855724: (28, 1, 48000),
2228464775: (41, 1, 48000),
2229303758: (10, 2, 48000),
2277581269: (47, 2, 48000),
2278858296: (5, 2, 48000),
2306882422: (27, 2, 32000),
2387864134: (37, 1, 32000),
2393897258: (14, 1, 22050),
2416058094: (54, 2, 44100),
2433584805: (19, 1, 48000),
2463306753: (36, 2, 32000),
2476812531: (7, 2, 44100),
2480574217: (3, 1, 32000),
2515573983: (19, 2, 32000),
2518625877: (54, 2, 48000),
2521878884: (55, 2, 48000),
2541565254: (23, 1, 32000),
2553675040: (42, 1, 48000),
2559465173: (20, 2, 44100),
2608520307: (10, 2, 24000),
2618095280: (1, 1, 48000),
2659850884: (18, 1, 48000),
2666324792: (6, 2, 24000),
2672229404: (34, 2, 32000),
2737885923: (2, 1, 44100),
2767611644: (81, 2, 48000),
2794300776: (51, 2, 48000),
2873171150: (6, 2, 32000),
2903626637: (27, 2, 48000),
2908802410: (18, 1, 32000),
2934069518: (36, 1, 48000),
2939054206: (45, 2, 32000),
2953683751: (42, 2, 48000),
2967837743: (54, 1, 24000),
3008541169: (39, 2, 44100),
3047286250: (10, 1, 24000),
3056261343: (100, 1, 48000),
3065218699: (42, 2, 44100),
3072374402: (36, 1, 44100),
3122448222: (4, 2, 44100),
3196249009: (72, 2, 48000),
3200735724: (40, 2, 44100),
3226075376: (36, 2, 44100),
3270152575: (46, 2, 44100),
3272938022: (28, 2, 44100),
3301116457: (37, 2, 48000),
3311335958: (81, 1, 48000),
3403311680: (18, 1, 44100),
3411932751: (18, 2, 48000),
3420849028: (15, 2, 24000),
3500774527: (9, 1, 48000),
3518960095: (25, 2, 32000),
3591795145: (3, 2, 32000),
3605052372: (63, 1, 48000),
3626110227: (32, 1, 44100),
3692865394: (9, 2, 48000),
3705160380: (7, 2, 11000),
3720258178: (14, 2, 24000),
3722227180: (54, 1, 44100),
3725282119: (1, 2, 8000),
3762538901: (7, 1, 32000),
3773977122: (11, 1, 44100),
3797477140: (100, 1, 11000),
3874758218: (28, 2, 48000),
4009117172: (5, 2, 44100),
4047431017: (18, 2, 44100),
4060755313: (29, 1, 44100),
4067957314: (50, 2, 48000),
4079447623: (8, 1, 44100),
4080492847: (56, 1, 48000),
4096430065: (44, 2, 44100),
4202951487: (7, 2, 48000),
4274902700: (14, 1, 24000)
}
| 28.077844
| 29
| 0.61399
|
dd004d4de09f1ae229a3639e11a79d4da9ab34c0
| 9,344
|
py
|
Python
|
testplan/testing/multitest/driver/fix/client.py
|
apretori-tic/testplan
|
d3926d328e46c88d12a9c2568d4918f8aa44f26e
|
[
"Apache-2.0"
] | null | null | null |
testplan/testing/multitest/driver/fix/client.py
|
apretori-tic/testplan
|
d3926d328e46c88d12a9c2568d4918f8aa44f26e
|
[
"Apache-2.0"
] | null | null | null |
testplan/testing/multitest/driver/fix/client.py
|
apretori-tic/testplan
|
d3926d328e46c88d12a9c2568d4918f8aa44f26e
|
[
"Apache-2.0"
] | null | null | null |
"""FixClient driver classes."""
import os
import errno
import socket
from schema import Use, Or
from testplan.common.config import ConfigOption
from testplan.common.utils.context import is_context, expand
from testplan.common.utils.strings import slugify
from testplan.common.utils.sockets.fix.client import Client
from testplan.common.utils.timing import (TimeoutException,
TimeoutExceptionInfo)
from ..base import Driver, DriverConfig
class FixClientConfig(DriverConfig):
"""
Configuration object for
:py:class:`~testplan.testing.multitest.driver.fix.client.FixClient` driver.
"""
@classmethod
def get_options(cls):
return {
'msgclass': type,
'codec': object,
'host': Or(str,
lambda x: is_context(x)),
'port': Or(Use(int), lambda x: is_context(x)),
'sender': str,
'target': str,
ConfigOption('sendersub', default=None): str,
ConfigOption('interface', default=None): tuple,
ConfigOption('connect_at_start', default=True): bool,
ConfigOption('logon_at_start', default=True): bool,
ConfigOption('custom_logon_tags', default=None): object,
ConfigOption('receive_timeout', default=30):
Or(int, float),
ConfigOption('logon_timeout', default=10):
Or(int, float),
ConfigOption('logoff_timeout', default=3):
Or(int, float)
}
class FixClient(Driver):
"""
Fix client driver.
This is built on top of the
:py:class:`testplan.common.utils.sockets.fix.client.Client` class, which
provides equivalent functionality and may be used outside of MultiTest.
:param msgclass: Type used to construct logon, logoff and received FIX
messages.
:type msgclass: ``type``
:param codec: A Codec to use to encode and decode FIX messages.
:type codec: a ``Codec`` instance
:param host: Target host name. This can be a
:py:class:`~testplan.common.utils.context.ContextValue`
and will be expanded on runtime.
:type host: ``str``
:param port: Target port number. This can be a
:py:class:`~testplan.common.utils.context.ContextValue`
and will be expanded on runtime.
:type port: ``int``
:param sender: FIX SenderCompID.
:type sender: ``str``
:param target: FIX TargetCompID.
:type target: ``str``
:param sendersub: FIX SenderSubID.
:type sendersub: ``str``
:param interface: Interface to bind to.
:type interface: ``tuple``(``str, ``int``)
:param connect_at_start: Connect to server on start. Default: True
:type connect_at_start: ``bool``
:param logon_at_start: Attempt FIX logon if connected at start.
:type logon_at_start: ``bool``
:param custom_logon_tags: Custom logon tags to be merged into
the ``35=A`` message.
:type custom_logon_tags: ``FixMessage``
:param logon_timeout: Timeout in seconds while receiving from socket.
:type logon_timeout: ``int`` or ``float``
:param logon_timeout: Timeout in seconds to wait for logon response.
:type logon_timeout: ``int`` or ``float``
:param logoff_timeout: Timeout in seconds to wait for logoff response.
:type logoff_timeout: ``int`` or ``float``
Also inherits all
:py:class:`~testplan.testing.multitest.driver.base.Driver`` options.
"""
CONFIG = FixClientConfig
def __init__(self, **options):
super(FixClient, self).__init__(**options)
self._host = None
self._port = None
self._client = None
self._logname = '{0}.log'.format(slugify(self.cfg.name))
@property
def logpath(self):
"""Fix client logfile in runpath."""
return os.path.join(self.runpath, self._logname)
@property
def host(self):
"""Target host name."""
return self._host
@property
def port(self):
"""Client port number assigned."""
return self._port
@property
def sender(self):
"""Shortcut to be used inside testcases."""
return self.cfg.sender
@property
def target(self):
"""Shortcut to be used inside testcases."""
return self.cfg.target
@property
def sendersub(self):
"""Shortcut to be used inside testcases."""
return self.cfg.sendersub
def starting(self):
"""Start the FIX client and optionally connect to host/post."""
super(FixClient, self).starting()
self._setup_file_logger(self.logpath)
self.reconnect()
def connect(self):
"""
Connect client.
"""
self._client.connect()
self._host, self._port = self._client.address
def reconnect(self):
"""
Starts a stopped FixClient instance reconnecting to the original host
and port as it was originally started with.
If host and port were specified as context values they will be resolved
again at this point.
This is helpful in cases the dependent process has also restarted on a
different port.
"""
self._stop_logic()
server_host = expand(self.cfg.host, self.context)
server_port = expand(self.cfg.port, self.context, int)
self._client = Client(msgclass=self.cfg.msgclass, codec=self.cfg.codec,
host=server_host, port=server_port,
sender=self.cfg.sender, target=self.cfg.target,
sendersub=self.cfg.sendersub,
interface=self.cfg.interface,
logger=self.file_logger)
if self.cfg.connect_at_start or self.cfg.logon_at_start:
self.connect()
if self.cfg.logon_at_start:
self.logon()
def logon(self):
"""
Logon to server.
"""
self._client.sendlogon(custom_tags=self.cfg.custom_logon_tags)
rcv = self._client.receive(timeout=self.cfg.logon_timeout)
self.file_logger.debug('Received logon response {}.'.format(rcv))
if 35 not in rcv or rcv[35] != b'A':
self.file_logger.debug('Unexpected logon response.')
raise Exception('Unexpected logon response : {0}.'.format(rcv))
def logoff(self):
"""
Logoff from server.
"""
self._client.sendlogoff()
rcv = self._client.receive(timeout=self.cfg.logoff_timeout)
self.file_logger.debug('Received logoff response {}.'.format(rcv))
if 35 not in rcv or rcv[35] != b'5':
self.file_logger.debug(
'Unexpected logoff response {}'.format(rcv))
self.logger.error(
'Fixclient {}: received unexpected logoff response : {}'.format(
self.cfg.name, rcv))
def send(self, msg):
"""
Send message.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:return: msg
:rtype: ``FixMessage``
"""
return self._client.send(msg)[1]
def send_tsp(self, msg):
"""
Send message.
:param msg: Message to be sent.
:type msg: ``FixMessage``
:return: Timestamp when msg sent (in microseconds from epoch) and msg.
:rtype: ``tuple`` of ``long`` and ``FixMessage``
"""
return self._client.send(msg)
def receive(self, timeout=None):
"""
Receive message.
:param timeout: Timeout in seconds.
:type timeout: ``int``
:return: received ``FixMessage`` object
:rtype: ``FixMessage``
"""
timeout = timeout if timeout is not None else self.cfg.receive_timeout
timeout_info = TimeoutExceptionInfo()
try:
received = self._client.receive(timeout=timeout)
except socket.timeout:
self.logger.error(
'Timed out waiting for message for {} seconds.'.format(
timeout))
raise TimeoutException(
'Timed out waiting for message on {0}. {1}'.format(
self.cfg.name, timeout_info.msg()))
self.file_logger.debug('Received msg {}.'.format(received))
return received
def flush(self, timeout=0):
"""
Flush all inbound messages.
:param timeout: Message receive timeout in seconds. Default: 0
:type timeout: ``int``
"""
while True:
try:
self.receive(timeout=timeout)
except TimeoutException:
break
except socket.error:
break
def _stop_logic(self):
if self._client:
try:
self.logoff()
except socket.error as err:
if err.errno != errno.EPIPE:
# Not a broken pipe
raise
self._client.close()
self._client = None
self.file_logger.debug('Stopped client')
def stopping(self):
"""Stops the FIX client."""
super(FixClient, self).stopping()
self._stop_logic()
def aborting(self):
"""Abort logic that stops the FIX client."""
self._stop_logic()
| 33.017668
| 80
| 0.592466
|
39f40870b4dba9faa2c7762d0bfc34c9879af5d9
| 572
|
py
|
Python
|
grafica/assets_path.py
|
cristobalfuenzalida/grafica
|
cf7bb90c4c5c34ee56d328188111c917a0d10389
|
[
"MIT"
] | 12
|
2021-08-17T12:57:39.000Z
|
2022-03-28T02:52:30.000Z
|
grafica/assets_path.py
|
cristobalfuenzalida/grafica
|
cf7bb90c4c5c34ee56d328188111c917a0d10389
|
[
"MIT"
] | 3
|
2021-03-18T17:32:12.000Z
|
2021-03-22T10:02:19.000Z
|
grafica/assets_path.py
|
cristobalfuenzalida/grafica
|
cf7bb90c4c5c34ee56d328188111c917a0d10389
|
[
"MIT"
] | 13
|
2021-08-17T03:23:21.000Z
|
2022-03-20T21:40:16.000Z
|
# coding=utf-8
"""Convenience functionality to access assets files"""
import os.path
__author__ = "Daniel Calderon"
__license__ = "MIT"
def getAssetPath(filename):
"""Convenience function to access assets files regardless from where you run the example script."""
thisFilePath = os.path.abspath(__file__)
thisFolderPath = os.path.dirname(thisFilePath)
parentFolderPath = os.path.dirname(thisFolderPath)
assetsDirectory = os.path.join(parentFolderPath, "assets")
requestedPath = os.path.join(assetsDirectory, filename)
return requestedPath
| 30.105263
| 103
| 0.755245
|
b159b87b6f7631fde4f4fe1f71677b78824386a3
| 55,120
|
py
|
Python
|
mirage/libs/ble.py
|
byte-arts/mirage
|
ab67f234d446abe3a02796145165e16966175b51
|
[
"MIT"
] | null | null | null |
mirage/libs/ble.py
|
byte-arts/mirage
|
ab67f234d446abe3a02796145165e16966175b51
|
[
"MIT"
] | null | null | null |
mirage/libs/ble.py
|
byte-arts/mirage
|
ab67f234d446abe3a02796145165e16966175b51
|
[
"MIT"
] | 1
|
2021-06-22T09:34:01.000Z
|
2021-06-22T09:34:01.000Z
|
from scapy.all import *
from mirage.core.module import WirelessModule
from mirage.libs.ble_utils.scapy_hci_layers import *
from mirage.libs.ble_utils.packets import *
from mirage.libs.ble_utils.constants import *
from mirage.libs.bt_utils.assigned_numbers import AssignedNumbers
from mirage.libs.ble_utils.ubertooth import *
from mirage.libs.ble_utils.btlejack import *
from mirage.libs.ble_utils.nrfsniffer import *
from mirage.libs.ble_utils.adb import *
from mirage.libs.ble_utils.hcidump import *
from mirage.libs.ble_utils.pcap import *
from mirage.libs.ble_utils.helpers import *
from mirage.libs.ble_utils.crypto import *
from mirage.libs.ble_utils.scapy_link_layers import *
from mirage.libs.ble_utils.dissectors import *
from mirage.libs.ble_utils.att_server import *
from mirage.libs import wireless,bt,io
class BLEHCIDevice(bt.BtHCIDevice):
'''
This device allows to communicate with an HCI Device in order to use Bluetooth Low Energy protocol.
The corresponding interfaces are : ``hciX`` (e.g. "hciX")
The following capabilities are actually supported :
+-----------------------------------+----------------+
| Capability | Available ? |
+===================================+================+
| SCANNING | yes |
+-----------------------------------+----------------+
| ADVERTISING | yes |
+-----------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | no |
+-----------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | no |
+-----------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | no |
+-----------------------------------+----------------+
| JAMMING_CONNECTIONS | no |
+-----------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-----------------------------------+----------------+
| HIJACKING_CONNECTIONS | no |
+-----------------------------------+----------------+
| INITIATING_CONNECTION | yes |
+-----------------------------------+----------------+
| RECEIVING_CONNECTION | yes |
+-----------------------------------+----------------+
| COMMUNICATING_AS_MASTER | yes |
+-----------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | yes |
+-----------------------------------+----------------+
| HCI_MONITORING | no |
+-----------------------------------+----------------+
'''
sharedMethods = [
"getConnections",
"switchConnection",
"getCurrentConnection",
"getCurrentConnectionMode",
"getAddressByHandle",
"getCurrentHandle",
"isConnected",
"setScan",
"setAdvertising",
"setAdvertisingParameters",
"setScanningParameters",
"getAddress",
"setAddress",
"getMode",
"getAddressMode",
"getManufacturer",
"isAddressChangeable",
"encryptLink",
"updateConnectionParameters"
]
def _setCurrentHandle(self,handle,address="",mode="public"):
if handle != -1:
found = False
for connection in self.handles:
if connection["handle"] == handle:
found = True
if not found:
self.handles.append({"address":address.upper() if address is not None else "", "handle":handle, "mode":mode})
self.currentHandle = handle
def getCurrentConnectionMode(self):
'''
This method returns the connection mode ("public" or "random") of the currently established connection.
:return: connection mode of the current connection ("public" or "random")
:rtype: str
:Example:
>>> device.getCurrentConnectionMode()
'public'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
handle = self.getCurrentHandle()
for connection in self.handles:
if connection['handle'] == handle:
return connection['mode']
return None
def _initBLE(self):
self.operationMode = BLEOperationMode.NORMAL
self._enterCommandMode()
self._internalCommand(HCI_Cmd_Reset())
self._internalCommand(HCI_Cmd_Set_Event_Filter())
self._internalCommand(HCI_Cmd_Connect_Accept_Timeout())
self._internalCommand(HCI_Cmd_Set_Event_Mask())
self._internalCommand(HCI_Cmd_LE_Host_Supported())
self._exitCommandMode()
self.capabilities = ["SCANNING", "ADVERTISING", "INITIATING_CONNECTION", "RECEIVING_CONNECTION", "COMMUNICATING_AS_MASTER", "COMMUNICATING_AS_SLAVE"]
def _setOperationMode(self,value):
self.operationMode = value
def _getOperationMode(self):
return self.operationMode
def getMode(self):
'''
This method returns the current mode used by the HCI Device.
Three modes are available and indicates the current state of the device: "NORMAL", "SCANNING" and "ADVERTISING"
:return: string indicating the current mode
:rtype: str
:Example:
>>> device.getMode()
'NORMAL'
>>> device.setScan(enable=True)
>>> device.getMode()
'SCANNING'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
mode = self._getOperationMode()
if mode == BLEOperationMode.NORMAL:
return "NORMAL"
elif mode == BLEOperationMode.SCANNING:
return "SCANNING"
elif mode == BLEOperationMode.ADVERTISING:
return "ADVERTISING"
def setAddress(self,address,random=False):
'''
This method allows to modify the BD address and the BD address type of the device, if it is possible.
:param address: new BD address
:type address: str
:param random: boolean indicating if the address is random
:type random: bool
:return: boolean indicating if the operation was successful
:rtype: bool
:Example:
>>> device.setAddress("12:34:56:78:9A:BC",random=True) # set the device's address to 12:34:56:78:9A:BC (random)
[INFO] Changing HCI Device (hci0) Random Address to : 12:34:56:78:9A:BC
[SUCCESS] BD Address successfully modified !
True
>>> device.setAddress("12:34:56:78:9A:BC") # set the device's address to 12:34:56:78:9A:BC (public)
[INFO] Changing HCI Device (hci0) Address to : 12:34:56:78:9A:BC
[SUCCESS] BD Address successfully modified !
True
>>> device2.setAddress("12:34:56:78:9A:BC")
[INFO] Changing HCI Device (hci0) Address to : 12:34:56:78:9A:BC
[ERROR] The vendor has not provided a way to modify the BD Address.
False
.. warning::
Mirage uses some vendor specific HCI Commands in order to modify the public BD address. If the vendor has not provided a way to modify the BD address, it is not possible to change it (see *device2* in the example section).
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if random:
self.addressMode = "random"
self._enterCommandMode()
io.info("Changing HCI Device ("+str(self.interface)+") Random Address to : "+address)
self._internalCommand(HCI_Cmd_LE_Set_Random_Address(address=address))
io.success("BD Address successfully modified !")
self._exitCommandMode()
return True
else:
self.addressMode = "public"
rValue = super().setAddress(address)
self._setOperationMode(BLEOperationMode.NORMAL)
return rValue
def setScan(self,enable=True, passive=False):
'''
This method enables or disables the scanning mode.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:param passive: boolean indicating if the scan has to be passive (e.g. no *SCAN_REQ*)
:type passive: bool
:Example:
>>> device.setScan(enable=True, passive=True) # scanning mode enabled in passive mode
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self._enterCommandMode()
if enable and self._getOperationMode() == BLEOperationMode.NORMAL:
self._internalCommand(HCI_Cmd_LE_Set_Scan_Parameters(type=1 if not passive else 0))
self._internalCommand(HCI_Cmd_LE_Set_Scan_Enable())
self._setOperationMode(BLEOperationMode.SCANNING)
elif not enable and self._getOperationMode() == BLEOperationMode.SCANNING:
self._internalCommand(HCI_Cmd_LE_Set_Scan_Enable(enable=0))
self._setOperationMode(BLEOperationMode.NORMAL)
self._exitCommandMode()
def setAdvertising(self,enable=True):
'''
This method enables or disables the advertising mode.
:param enable: boolean indicating if the advertising mode must be enabled
:type enable: bool
:Example:
>>> device.setAdvertising(enable=True) # scanning mode enabled
>>> device.setAdvertising(enable=False) # scanning mode disabled
.. warning::
Please note that if no advertising and scanning data has been provided before this function call, nothing will be advertised. You have to set the scanning Parameters and the advertising Parameters before calling this method.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self._enterCommandMode()
if enable and self._getOperationMode() == BLEOperationMode.NORMAL:
self._internalCommand(HCI_Cmd_LE_Set_Advertise_Enable(enable=1))
self._setOperationMode(BLEOperationMode.ADVERTISING)
elif not enable and self._getOperationMode() == BLEOperationMode.ADVERTISING:
self._internalCommand(HCI_Cmd_LE_Set_Advertise_Enable(enable=0))
self._setOperationMode(BLEOperationMode.NORMAL)
self._exitCommandMode()
def updateConnectionParameters(self,minInterval=0, maxInterval=0, latency=0, timeout=0,minCe=0, maxCe=0xFFFF):
'''
This method allows to update connection parameters according to the data provided.
It will mainly be used if an incoming BLEConnectionParameterUpdateRequest is received.
:param minInterval: minimal interval
:type minInterval: int
:param maxInterval: maximal interval
:type maxInterval: int
:param latency: connection latency
:type latency: int
:param timeout: connection timeout
:type timeout: int
:param minCe: minimum connection event length
:type minCe: int
:param maxCe: maximum connection event length
:type maxCe: int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self._enterCommandMode()
self._internalCommand(HCI_Cmd_LE_Connection_Update(handle=self.getCurrentHandle(),min_interval=minInterval, max_interval=maxInterval,latency=latency, timeout=timeout, min_ce=minCe, max_ce=maxCe),noResponse=True)
self._exitCommandMode()
def setScanningParameters(self, data=b""):
'''
This method sets scanning parameters according to the data provided.
It will mainly be used by *SCAN_RESP* packets.
:param data: data to use in *SCAN_RESP*
:type data: bytes
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.setScan(enable=False)
self._enterCommandMode()
advData = data
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self._internalCommand(New_HCI_Cmd_LE_Set_Scan_Response_Data(data=advData,len=len(data)))
self._exitCommandMode()
def setAdvertisingParameters(self,type = "ADV_IND",destAddr = "00:00:00:00:00:00",data = b"",intervalMin = 200, intervalMax = 210, daType='public', oaType='public'):
'''
This method sets advertising parameters according to the data provided.
It will mainly be used by *ADV_IND-like* packets.
:param type: type of advertisement (*available values :* "ADV_IND", "ADV_DIRECT_IND", "ADV_SCAN_IND", "ADV_NONCONN_IND", "ADV_DIRECT_IND_LOW")
:type type: str
:param destAddress: destination address (it will be used if needed)
:type destAddress: str
:param data: data included in the payload
:type data: bytes
:param intervalMin: minimal interval
:type intervalMin: int
:param intervalMax: maximal interval
:type intervalMax: int
:param daType: string indicating the destination address type ("public" or "random")
:type daType: str
:param oaType: string indicating the origin address type ("public" or "random")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.setAdvertising(enable=False)
self.setScan(enable=False)
self._enterCommandMode()
if type == "ADV_IND":
advType = ADV_IND
elif type == "ADV_DIRECT_IND":
advType = ADV_DIRECT_IND
elif type == "ADV_SCAN_IND":
advType = ADV_SCAN_IND
elif type == "ADV_NONCONN_IND":
advType = ADV_NONCONN_IND
elif type == "ADV_DIRECT_IND_LOW":
advType = ADV_DIRECT_IND_LOW
else:
io.fail("Advertisements type not recognized, using ADV_IND.")
advtype = ADV_IND
dAddr = None if destAddr == "00:00:00:00:00:00" else destAddr
advData = data
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self._internalCommand(HCI_Cmd_LE_Set_Advertising_Parameters(adv_type=advType, daddr=dAddr, datype=daType, oatype=oaType,interval_min=intervalMin, interval_max = intervalMax))
self._internalCommand(New_HCI_Cmd_LE_Set_Advertising_Data(data=EIR_Hdr(data)))
self._exitCommandMode()
def _setAddressMode(self,mode="public"):
self.addressMode = mode
def getAddressMode(self):
'''
This method returns the address mode currently in use.
:return: address mode ("public" or "random")
:rtype: str
:Example:
>>> device.getAddressMode()
'public'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.addressMode
def init(self):
self.initializeBluetooth = False
super().init()
if self.ready:
self.addressMode = "public"
self._initBLE()
def encryptLink(self,rand=b"\x00\x00\x00\x00\x00\x00\x00\x00", ediv=0, ltk = b"\x00"*16):
'''
This method sends an encryption request to the current connection established and encrypts the link if possible.
:param rand: random value
:type rand: bytes
:param ediv: EDIV value
:type ediv: int
:param ltk: Long Term Key
:type ltk: bytes
:return: boolean indicating if the link was successfully encrypted
:rtype: bool
:Example:
>>> device.encryptLink(ltk=bytes.fromhex("000102030405060708090a0b0c0d0e0f")) # Short Term Key, ediv = rand = 0
True
.. seealso::
It is possible to encrypt the link using directly the encryption-related packets, such as :
* ``mirage.libs.ble_utils.packets.BLELongTermKeyRequest``
* ``mirage.libs.ble_utils.packets.BLELongTermKeyRequestReply``
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self._enterCommandMode()
handle = self.getCurrentHandle()
self._internalCommand(HCI_Cmd_LE_Start_Encryption_Request(
handle=handle,
rand=rand,
ediv=ediv,
ltk=ltk
)
,noResponse=True)
encryptionChange = self.socket.recv()
while encryptionChange is None or HCI_Event_Encryption_Change not in encryptionChange:
encryptionChange = self.socket.recv()
self._exitCommandMode()
return encryptionChange.enabled
class BLEEmitter(wireless.Emitter):
'''
This class is an Emitter for the Bluetooth Low Energy protocol ("ble").
It can instantiates the following devices :
* HCI Device (``mirage.libs.ble.BLEHCIDevice``) **[ interface "hciX" (e.g. "hci0") ]**
* Ubertooth Device (``mirage.libs.ble_utils.ubertooth.BLEUbertoothDevice``) **[ interface "ubertoothX" (e.g. "ubertooth0") ]**
* BTLEJack Device (``mirage.libs.ble_utils.btlejack.BTLEJackDevice``) **[ interface "microbitX" (e.g. "microbit0") ]**
* ADB Device (``mirage.libs.ble_utils.adb.ADBDevice``) **[ interface "adbX" (e.g. "adb0") ]**
* HCIDump Device (``mirage.libs.ble_utils.hcidump.BLEHcidumpDevice``) **[ interface "hcidumpX" (e.g. "hcidump0") ]**
* PCAP Device (``mirage.libs.ble_utils.pcap.BLEPCAPDevice``) **[ interface "<file>.pcap" (e.g. "pairing.pcap") ]**
'''
def __init__(self, interface="hci0"):
deviceClass = None
if "hcidump" in interface:
deviceClass = BLEHcidumpDevice
elif "hci" in interface:
deviceClass = BLEHCIDevice
elif "ubertooth" in interface:
deviceClass = BLEUbertoothDevice
elif "microbit" in interface:
deviceClass = BTLEJackDevice
elif "adb" in interface:
deviceClass = ADBDevice
elif "nrfsniffer" in interface:
deviceClass = NRFSnifferDevice
elif interface[-5:] == ".pcap":
deviceClass = BLEPCAPDevice
super().__init__(interface=interface, packetType=BLEPacket, deviceType=deviceClass)
def convert(self,packet):
if packet.packet is None:
if isinstance(packet,BLEEncryptedPacket):
packet.packet = BTLE() / BTLE_DATA(packet.data)
else:
# Specific sublayers
if "hci" in self.interface:
packet.packet = HCI_Hdr()
if isinstance(packet,BLEConnect):
self.device._setAddressMode(packet.initiatorType)
packet.packet /= HCI_Command_Hdr()/HCI_Cmd_LE_Create_Connection(
paddr=packet.dstAddr,
patype=packet.type,
atype=packet.initiatorType)
elif isinstance(packet,BLEConnectionCancel):
packet.packet /= HCI_Command_Hdr()/HCI_Cmd_LE_Create_Connection_Cancel()
else:
handle = (packet.connectionHandle if packet.connectionHandle != -1
else self.device.getCurrentHandle())
if isinstance(packet,BLEDisconnect):
packet.packet /= HCI_Command_Hdr()/HCI_Cmd_Disconnect(handle=handle)
elif isinstance(packet,BLELongTermKeyRequest):
packet.packet /= HCI_Command_Hdr()/HCI_Cmd_LE_Start_Encryption_Request(
handle=handle,
rand=packet.rand,
ediv=packet.ediv,
ltk=packet.ltk)
elif isinstance(packet,BLELongTermKeyRequestReply):
packet.packet /= HCI_Command_Hdr()/(
HCI_Cmd_LE_Long_Term_Key_Request_Reply(handle=handle,ltk=packet.ltk)
if packet.positive
else HCI_Cmd_LE_Long_Term_Key_Request_Negative_Reply(handle=handle))
else:
packet.packet /= HCI_ACL_Hdr(handle=handle)
else:
packet.packet = BTLE()
if isinstance(packet, BLEAdvertisement):
packet.packet /= BTLE_ADV(RxAdd=0x00 if packet.addrType == "public" else 0x01)
advType = packet.type
if advType == "ADV_IND":
packet.packet /= BTLE_ADV_IND(AdvA = packet.addr, data=packet.data)
elif advType == "ADV_DIRECT_IND":
if isinstance(packet,BLEAdvDirectInd):
initA = packet.dstAddr
else:
initA = "00:00:00:00:00:00"
packet.packet /= BTLE_ADV_DIRECT_IND(AdvA = packet.addr, InitA = initA)
elif advType == "ADV_NONCONN_IND":
packet.packet /= BTLE_ADV_NONCONN_IND()
elif advType == "ADV_SCAN_IND":
packet.packet /= BTLE_ADV_SCAN_IND()
elif advType == "SCAN_REQ":
if isinstance(packet,BLEAdvDirectInd):
scanA = packet.dstAddr
else:
scanA = "00:00:00:00:00:00"
packet.packet /= BTLE_SCAN_REQ(AdvA = packet.addr, ScanA = scanA)
elif advType == "SCAN_RSP":
packet.packet /= BTLE_SCAN_RSP(AdvA = packet.addr, data=packet.data)
elif advType == "CONNECT_REQ" or isinstance(packet,BLEConnectRequest):
packet.packet.TxAdd = 0x00 if packet.srcAddrType == "public" else 0x01
packet.packet.RxAdd = 0x00 if packet.dstAddrType == "public" else 0x01
packet.packet /= BTLE_CONNECT_REQ(
InitA=packet.srcAddr,
AdvA=packet.dstAddr,
AA=packet.accessAddress,
crc_init=packet.crcInit,
win_size=packet.winSize,
win_offset=packet.winOffset,
interval=packet.hopInterval,
latency=packet.latency,
timeout=packet.timeout,
chM =packet.channelMap,
SCA=packet.SCA,
hop=packet.hopIncrement
)
packet.packet.access_addr = 0x8e89bed6
else:
packet.packet /= BTLE_DATA()
if isinstance(packet,BLEDisconnect):
packet.packet /= ControlPDU(optcode=0x02)
elif isinstance(packet,BLEEmptyPDU):
data.LLID = 1
elif isinstance(packet,BLEControlPDU):
optcode = 0
if packet.type == "LL_CONNECTION_UPDATE_REQ":
optcode = 0x00
elif packet.type == "LL_CHANNEL_MAP_REQ":
optcode = 0x01
elif packet.type == "LL_TERMINATE_IND":
optcode = 0x02
elif packet.type == "LL_ENC_REQ":
optcode = 0x03
elif packet.type == "LL_ENC_RSP":
optcode = 0x04
elif packet.type == "LL_START_ENC_REQ":
optcode = 0x05
elif packet.type == "LL_START_ENC_RESP":
optcode = 0x06
elif packet.type == "LL_UNKNOWN_RSP":
optcode = 0x07
elif packet.type == "LL_FEATURE_REQ":
optcode = 0x08
elif packet.type == "LL_FEATURE_RSP":
optcode = 0x09
elif packet.type == "LL_PAUSE_ENC_REQ":
optcode = 0x0A
elif packet.type == "LL_PAUSE_ENC_RSP":
optcode = 0x0B
elif packet.type == "LL_VERSION_IND":
optcode = 0x0C
elif packet.type == "LL_REJECT_IND":
optcode = 0x0D
packet.packet /= ControlPDU(optcode = optcode)
if packet.data is not None or packet.data != b"":
packet.packet /= packet.data
# Common sublayers
if HCI_Command_Hdr not in packet.packet and ControlPDU not in packet.packet and BTLE_ADV not in packet.packet:
if (
isinstance(packet,BLEConnectionParameterUpdateRequest) or
isinstance(packet,BLEConnectionParameterUpdateResponse)
):
packet.packet /= L2CAP_Hdr()/L2CAP_CmdHdr(id=packet.l2capCmdId)
elif (
isinstance(packet,BLESecurityRequest) or
isinstance(packet,BLEPairingRequest) or
isinstance(packet,BLEPairingResponse) or
isinstance(packet,BLEPairingFailed) or
isinstance(packet,BLEPairingConfirm) or
isinstance(packet,BLEPairingRandom) or
isinstance(packet,BLEEncryptionInformation) or
isinstance(packet,BLEMasterIdentification) or
isinstance(packet,BLEIdentityInformation) or
isinstance(packet,BLEIdentityAddressInformation) or
isinstance(packet,BLESigningInformation)
):
packet.packet /= L2CAP_Hdr(cid=6)/SM_Hdr()
else:
packet.packet /= L2CAP_Hdr(cid=4)/ATT_Hdr()
# Common upper layers
if isinstance(packet,BLEConnectionParameterUpdateRequest):
packet.packet /= L2CAP_Connection_Parameter_Update_Request(
max_interval=packet.maxInterval,
min_interval=packet.minInterval,
slave_latency=packet.slaveLatency,
timeout_mult=packet.timeoutMult)
elif isinstance(packet,BLEConnectionParameterUpdateResponse):
packet.packet /= L2CAP_Connection_Parameter_Update_Response(move_result=packet.moveResult)
elif isinstance(packet,BLESecurityRequest):
packet.packet /= SM_Security_Request(authentication=packet.authentication)
elif isinstance(packet,BLEPairingRequest):
packet.packet /= SM_Pairing_Request(
iocap=packet.inputOutputCapability,
oob=1 if packet.outOfBand else 0,
authentication=packet.authentication,
max_key_size = packet.maxKeySize,
initiator_key_distribution=packet.initiatorKeyDistribution,
responder_key_distribution = packet.responderKeyDistribution)
elif isinstance(packet,BLEPairingResponse):
packet.packet /= SM_Pairing_Response(
iocap=packet.inputOutputCapability,
oob=1 if packet.outOfBand else 0,
authentication=packet.authentication,
max_key_size = packet.maxKeySize,
initiator_key_distribution=packet.initiatorKeyDistribution,
responder_key_distribution = packet.responderKeyDistribution)
elif isinstance(packet,BLEPairingFailed):
packet.packet /= SM_Failed(reason=packet.reason)
elif isinstance(packet,BLEPairingConfirm):
packet.packet /= SM_Confirm(confirm=packet.confirm)
elif isinstance(packet,BLEPairingRandom):
packet.packet /= SM_Random(random=packet.random)
elif isinstance(packet,BLEEncryptionInformation):
packet.packet /= SM_Encryption_Information(ltk=packet.ltk)
elif isinstance(packet,BLEMasterIdentification):
packet.packet /= SM_Master_Identification(ediv=packet.ediv, rand=packet.rand)
elif isinstance(packet,BLEIdentityInformation):
packet.packet /= SM_Identity_Information(irk=packet.irk)
elif isinstance(packet,BLEIdentityAddressInformation):
packet.packet /= SM_Identity_Address_Information(
atype=0 if packet.type=="public" else 1,
address=packet.address
)
elif isinstance(packet,BLESigningInformation):
packet.packet /= SM_Signing_Information(csrk=packet.csrk)
elif isinstance(packet, BLEFindByTypeValueRequest):
packet.packet /= ATT_Find_By_Type_Value_Request(start=packet.startHandle,
end=packet.endHandle,
uuid=packet.uuid,
data=packet.data)
elif isinstance(packet, BLEFindByTypeValueResponse):
packet.packet /= ATT_Find_By_Type_Value_Response(handles=packet.handles)
elif isinstance(packet,BLEErrorResponse):
packet.packet /= ATT_Error_Response(request=packet.request, handle=packet.handle,ecode=packet.ecode)
elif isinstance(packet,BLEExchangeMTURequest):
packet.packet /= ATT_Exchange_MTU_Request(mtu = packet.mtu)
elif isinstance(packet,BLEExchangeMTUResponse):
packet.packet /= ATT_Exchange_MTU_Response(mtu = packet.mtu)
elif isinstance(packet,BLEReadByGroupTypeRequest):
packet.packet /= ATT_Read_By_Group_Type_Request(
start=packet.startHandle,
end=packet.endHandle,
uuid=packet.uuid)
elif isinstance(packet,BLEReadByGroupTypeResponse):
packet.packet /= ATT_Read_By_Group_Type_Response(data=packet.data,length=packet.length)
elif isinstance(packet,BLEReadByTypeRequest):
packet.packet /= ATT_Read_By_Type_Request(
start=packet.startHandle,
end=packet.endHandle,
uuid=packet.uuid)
elif isinstance(packet,BLEReadByTypeResponse):
packet.packet /= ATT_Read_By_Type_Response(packet.data)
elif isinstance(packet,BLEReadBlobRequest):
packet.packet /= New_ATT_Read_Blob_Request(gatt_handle=packet.handle,offset=packet.offset)
elif isinstance(packet,BLEReadBlobResponse):
packet.packet /= New_ATT_Read_Blob_Response(value=packet.value)
elif isinstance(packet,BLEHandleValueNotification):
packet.packet /= New_ATT_Handle_Value_Notification(gatt_handle=packet.handle,value=packet.value)
elif isinstance(packet,BLEHandleValueIndication):
packet.packet /= New_ATT_Handle_Value_Indication(gatt_handle=packet.handle,value=packet.value)
elif isinstance(packet,BLEHandleValueConfirmation):
packet.packet /= New_ATT_Handle_Value_Confirmation()
elif isinstance(packet,BLEFindInformationRequest):
packet.packet /= ATT_Find_Information_Request(start=packet.startHandle,end = packet.endHandle)
elif isinstance(packet,BLEFindInformationResponse):
packet.packet /= ATT_Find_Information_Response(bytes([packet.format]) + packet.data)
elif isinstance(packet,BLEWriteRequest):
packet.packet /= ATT_Write_Request(gatt_handle=packet.handle,data=packet.value)
elif isinstance(packet,BLEWriteCommand):
packet.packet /= ATT_Write_Command(gatt_handle=packet.handle,data=packet.value)
elif isinstance(packet,BLEWriteResponse):
packet.packet /= ATT_Write_Response()
elif isinstance(packet,BLEReadRequest):
packet.packet /= ATT_Read_Request(gatt_handle=packet.handle)
elif isinstance(packet,BLEReadResponse):
packet.packet /= ATT_Read_Response(value=packet.value)
if self.interface[-5:] == ".pcap" and packet.additionalInformations is not None:
packet.packet = BTLE_PPI(
rssi_count = packet.additionalInformations.rssi_count,
rssi_avg = packet.additionalInformations.rssi_avg,
rssi_min = packet.additionalInformations.rssi_min,
rssi_max=packet.additionalInformations.rssi_max,
btle_clk_100ns = packet.additionalInformations.clk_100ns,
btle_clkn_high = packet.additionalInformations.clkn_high,
btle_channel=packet.additionalInformations.channel)/packet.packet
return packet.packet
class BLEReceiver(wireless.Receiver):
'''
This class is a Receiver for the Bluetooth Low Energy protocol ("ble").
It can instantiates the following devices :
* HCI Device (``mirage.libs.ble.BLEHCIDevice``) **[ interface "hciX" (e.g. "hci0") ]**
* Ubertooth Device (``mirage.libs.ble_utils.ubertooth.BLEUbertoothDevice``) **[ interface "ubertoothX" (e.g. "ubertooth0") ]**
* BTLEJack Device (``mirage.libs.ble_utils.btlejack.BTLEJackDevice``) **[ interface "microbitX" (e.g. "microbit0") ]**
* ADB Device (``mirage.libs.ble_utils.adb.ADBDevice``) **[ interface "adbX" (e.g. "adb0") ]**
* HCIDump Device (``mirage.libs.ble_utils.hcidump.BLEHcidumpDevice``) **[ interface "hcidumpX" (e.g. "hcidump0") ]**
* PCAP Device (``mirage.libs.ble_utils.pcap.BLEPCAPDevice``) **[ interface "<file>.pcap" (e.g. "pairing.pcap") ]**
'''
def __init__(self,interface="hci0"):
deviceClass = None
self.encrypted = False
if "hcidump" in interface:
deviceClass = BLEHcidumpDevice
elif "hci" in interface:
deviceClass = BLEHCIDevice
elif "ubertooth" in interface:
deviceClass = BLEUbertoothDevice
elif "microbit" in interface:
deviceClass = BTLEJackDevice
elif "adb" in interface:
deviceClass = ADBDevice
elif "nrfsniffer" in interface:
deviceClass = NRFSnifferDevice
elif interface[-5:] == ".pcap":
deviceClass = BLEPCAPDevice
self.cryptoInstance = BLELinkLayerCrypto.getInstance()
# Fragment related
self.fragmentBuffer = b""
self.fragmentTotalSize = 0
super().__init__(interface=interface, packetType=BLEPacket, deviceType=deviceClass)
def stop(self):
self.encrypted = False
super().stop()
if self.isDeviceUp() and "hci" in self.interface and not "hcidump" in self.interface:
self.device._exitListening()
def convert(self,packet):
cryptoInstance = BLELinkLayerCrypto.getInstance()
if cryptoInstance is not None and cryptoInstance.ready and BTLE_DATA in packet and packet.LLID > 1:
plain, success = cryptoInstance.tryToDecrypt(raw(packet[BTLE_DATA:]))
if success:
packet[BTLE_DATA] = BTLE_DATA(plain)
new = BLEPacket()
new.packet = packet
if "hci" in self.interface or "adb" in self.interface:
#packet.show()
# Here, we have a start of fragmented HCI packet (L2CAP length > HCI length)
if packet.type == TYPE_ACL_DATA and packet.PB == 2 and L2CAP_Hdr in packet and packet[L2CAP_Hdr].len > packet[HCI_ACL_Hdr].len:
# store it in the buffer
self.fragmentBuffer = raw(packet)
self.fragmentTotalSize = packet[L2CAP_Hdr].len
# don't return it now, it's not ready
return None
# Here, we have the next fragment (PB = 1)
if packet.type == TYPE_ACL_DATA and packet.PB == 1 and L2CAP_Hdr in packet and len(self.fragmentBuffer) > 0:
# We create the scapy packet before the last fragment
previousPacket = HCI_Hdr(self.fragmentBuffer)
# We concatenate it to the previous fragments
self.fragmentBuffer += raw(packet[L2CAP_Hdr:])
# If we have received all fragments
if len(raw(previousPacket[L2CAP_Hdr:][1:])) + len(raw(packet[L2CAP_Hdr:])) == self.fragmentTotalSize:
# We create the full packet and the execution flow continues to dissect it
packet = HCI_Hdr(self.fragmentBuffer)
new.packet = packet
else:
# don't return it now, it's not ready
return None
if packet.type == TYPE_ACL_DATA:
if ATT_Exchange_MTU_Request in packet:
return BLEExchangeMTURequest(
mtu = packet[ATT_Exchange_MTU_Request].mtu,
connectionHandle = packet.handle
)
elif ATT_Error_Response in packet:
return BLEErrorResponse(
request = packet.request,
handle = packet[ATT_Error_Response].handle,
ecode = packet.ecode,
connectionHandle = packet.handle
)
elif ATT_Exchange_MTU_Response in packet:
return BLEExchangeMTUResponse(
mtu = packet[ATT_Exchange_MTU_Response].mtu,
connectionHandle = packet.handle
)
elif ATT_Read_Response in packet:
return BLEReadResponse(
value = packet[ATT_Read_Response].value,
connectionHandle = packet.handle
)
elif ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0xb:
return BLEReadResponse(
value = b"",
connectionHandle = packet.handle
)
elif ATT_Read_Request in packet:
return BLEReadRequest(
handle = packet[ATT_Read_Request].gatt_handle,
connectionHandle = packet.handle
)
elif ATT_Read_By_Group_Type_Response in packet:
return BLEReadByGroupTypeResponse(
connectionHandle = packet.handle,
length = packet[ATT_Read_By_Group_Type_Response].length,
data = packet[ATT_Read_By_Group_Type_Response].data
)
elif ATT_Read_By_Group_Type_Request in packet:
return BLEReadByGroupTypeRequest(
connectionHandle = packet.handle,
startHandle = packet[ATT_Read_By_Group_Type_Request].start,
endHandle = packet[ATT_Read_By_Group_Type_Request].end,
uuid =packet[ATT_Read_By_Group_Type_Request].uuid
)
elif ATT_Read_By_Type_Response in packet:
return BLEReadByTypeResponse(
connectionHandle = packet.handle,
data = bytes(packet[ATT_Read_By_Type_Response])
)
elif ATT_Read_By_Type_Request in packet:
return BLEReadByTypeRequest(
connectionHandle = packet.handle,
startHandle = packet[ATT_Read_By_Type_Request].start,
endHandle = packet[ATT_Read_By_Type_Request].end,
uuid=packet[ATT_Read_By_Type_Request].uuid
)
elif New_ATT_Read_Blob_Request in packet:
return BLEReadBlobRequest(
handle = packet[New_ATT_Read_Blob_Request].gatt_handle,
offset = packet[New_ATT_Read_Blob_Request].offset,
connectionHandle = packet.handle
)
elif New_ATT_Read_Blob_Response in packet:
return BLEReadBlobResponse(
value = packet[New_ATT_Read_Blob_Response].value,
connectionHandle = packet.handle
)
elif New_ATT_Handle_Value_Notification in packet:
return BLEHandleValueNotification(
connectionHandle = packet.handle,
handle = packet[New_ATT_Handle_Value_Notification].gatt_handle,
value = packet[New_ATT_Handle_Value_Notification].value
)
elif New_ATT_Handle_Value_Indication in packet:
return BLEHandleValueIndication(
connectionHandle = packet.handle,
handle = packet[New_ATT_Handle_Value_Indication].gatt_handle,
value = packet[New_ATT_Handle_Value_Indication].value
)
elif New_ATT_Handle_Value_Confirmation in packet or (ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0x1e):
return BLEHandleValueConfirmation(connectionHandle = packet.handle)
elif ATT_Write_Response in packet or (ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0x13):
return BLEWriteResponse(connectionHandle = packet.handle)
elif ATT_Write_Request in packet:
return BLEWriteRequest(
connectionHandle = packet.handle,
handle = packet.gatt_handle,
value = packet.data
)
elif ATT_Write_Command in packet:
return BLEWriteCommand(
connectionHandle = packet.handle,
handle = packet.gatt_handle,
value = packet.data
)
elif ATT_Find_Information_Request in packet:
return BLEFindInformationRequest(
connectionHandle = packet.handle,
startHandle=packet.start,
endHandle=packet.end
)
elif ATT_Find_Information_Response in packet:
return BLEFindInformationResponse(
connectionHandle = packet.handle,
data=bytes(packet[ATT_Find_Information_Response])[1:],
format=packet.format
)
elif SM_Security_Request in packet:
return BLESecurityRequest(
connectionHandle = packet.handle,
authentication = packet.authentication)
elif SM_Pairing_Request in packet:
return BLEPairingRequest(
connectionHandle = packet.handle,
inputOutputCapability=packet.iocap,
outOfBand=packet.oob == 1,
authentication=packet.authentication,
initiatorKeyDistribution=packet.initiator_key_distribution,
responderKeyDistribution=packet.responder_key_distribution,
payload=raw(packet[SM_Hdr:]))
elif SM_Pairing_Response in packet:
return BLEPairingResponse(
connectionHandle = packet.handle,
inputOutputCapability=packet.iocap,
outOfBand=packet.oob == 1,
authentication=packet.authentication,
initiatorKeyDistribution=packet.initiator_key_distribution,
responderKeyDistribution=packet.responder_key_distribution,
payload=raw(packet[SM_Hdr:]))
elif SM_Failed in packet:
return BLEPairingFailed(
connectionHandle = packet.handle,
reason=packet.reason)
elif SM_Confirm in packet:
return BLEPairingConfirm(
connectionHandle = packet.handle,
confirm=packet.confirm)
elif SM_Random in packet:
return BLEPairingRandom(
connectionHandle = packet.handle,
random=packet.random)
elif SM_Encryption_Information in packet:
return BLEEncryptionInformation(
connectionHandle = packet.handle,
ltk=packet.ltk)
elif SM_Master_Identification in packet:
return BLEMasterIdentification(
connectionHandle = packet.handle,
ediv=packet.ediv,
rand=packet.rand)
elif SM_Identity_Information in packet:
return BLEIdentityInformation(
connectionHandle = packet.handle,
irk=packet.irk)
elif SM_Identity_Address_Information in packet:
return BLEIdentityAddressInformation(
connectionHandle = packet.handle,
type="public" if packet.atype == 0 else "random",
address=packet.address)
elif SM_Signing_Information in packet:
return BLESigningInformation(
connectionHandle = packet.handle,
csrk=packet.csrk)
elif ATT_Find_By_Type_Value_Request in packet:
return BLEFindByTypeValueRequest(
startHandle=packet[ATT_Find_By_Type_Value_Request].start,
endHandle=packet[ATT_Find_By_Type_Value_Request].end,
uuid=packet[ATT_Find_By_Type_Value_Request].uuid,
data=packet[ATT_Find_By_Type_Value_Request].data)
elif ATT_Find_By_Type_Value_Response in packet:
return BLEFindByTypeValueResponse(
handles=packet[ATT_Find_By_Type_Value_Response].handles)
elif L2CAP_Connection_Parameter_Update_Request in packet:
return BLEConnectionParameterUpdateRequest(
l2capCmdId = packet.id,
connectionHandle = packet.handle,
maxInterval=packet.max_interval,
minInterval=packet.min_interval,
timeoutMult=packet.timeout_mult,
slaveLatency=packet.slave_latency
)
elif L2CAP_Connection_Parameter_Update_Response in packet:
return BLEConnectionParameterUpdateResponse(
l2capCmdId = packet.id,
connectionHandle = packet.handle,
moveResult=packet.move_result
)
return new
elif packet.type == TYPE_HCI_COMMAND:
if HCI_Cmd_LE_Create_Connection in packet:
return BLEConnect(
dstAddr = packet.paddr,
type="public" if packet.patype == 0 else "random",
initiatorType = "public" if packet.atype == 0 else "random")
elif HCI_Cmd_LE_Create_Connection_Cancel in packet:
return BLEConnectionCancel()
elif L2CAP_Connection_Parameter_Update_Request in packet:
return BLEConnectionParameterUpdateRequest(
l2capCmdId = packet.id,
connectionHandle = packet.handle,
maxInterval = packet.max_interval,
minInterval = packet.min_interval,
slaveLatency = packet.slave_latency,
timeoutMult=packet.timeout_mult
)
elif L2CAP_Connection_Parameter_Update_Response in packet:
return BLEConnectionParameterUpdateResponse(
l2capCmdId = packet.id,
connectionHandle = packet.handle,
moveResult=packet.move_result)
elif HCI_Cmd_LE_Start_Encryption_Request in packet:
return BLELongTermKeyRequest(
connectionHandle = packet.handle,
rand = packet.rand,
ediv = packet.ediv,
ltk=packet.ltk)
elif HCI_Cmd_LE_Long_Term_Key_Request_Reply in packet:
return BLELongTermKeyRequestReply(
connectionHandle = packet.handle,
ltk=packet.ltk,
positive=True)
elif HCI_Cmd_LE_Long_Term_Key_Request_Negative_Reply in packet:
return BLELongTermKeyRequestReply(connectionHandle = packet.handle,positive=False)
elif packet.type == TYPE_HCI_EVENT:
if packet.code == HCI_LE_META:
if packet.event == HCI_ENHANCED_CONNECTION_COMPLETE and packet.status == 0x0:
newHandle = packet[HCI_LE_Meta_Enhanced_Connection_Complete].handle
newAddress = str(packet[HCI_LE_Meta_Enhanced_Connection_Complete].paddr)
self.device._setCurrentHandle(newHandle,address=newAddress,mode="public" if packet.patype == 0 else "random")
io.info('Updating connection handle : '+str(newHandle))
return BLEConnectResponse(
srcAddr = packet.paddr,
dstAddr = '',
role="master" if packet.role==0 else "slave",
success=True,
type="public" if packet.patype == 0 else "random",
interval = packet.interval
)
if packet.event == HCI_CONNECTION_COMPLETE and packet.status == 0x0:
newHandle = packet[HCI_LE_Meta_Connection_Complete].handle
newAddress = str(packet[HCI_LE_Meta_Connection_Complete].paddr)
self.device._setCurrentHandle(newHandle,address=newAddress,mode="public" if packet.patype == 0 else "random")
io.info('Updating connection handle : '+str(newHandle))
return BLEConnectResponse(
srcAddr = packet.paddr,
dstAddr = '',
role="master" if packet.role==0 else "slave",
success=True,
type="public" if packet.patype == 0 else "random",
interval = packet.interval
)
elif packet.event == HCI_ADVERTISING_REPORT:
layer = packet[HCI_LE_Meta_Advertising_Report]
type = "SCAN_RSP" if layer.type == SCAN_RSP else "ADV_IND"
return BLEAdvertisement(
addr = layer.addr,
addrType = layer.atype,
data=layer.data,
type=type
)
elif packet.event == HCI_LONG_TERM_KEY_REQUEST or HCI_LE_Meta_Long_Term_Key_Request in packet:
return BLELongTermKeyRequest(
connectionHandle=packet.handle,
rand=packet.rand,
ediv=packet.ediv
)
elif packet.code == HCI_DISCONNECTION_COMPLETE:
handle = packet.handle
self.device._removeConnectionHandle(handle)
return BLEDisconnect(connectionHandle=handle)
else:
return None
elif "ubertooth" in self.interface or "microbit" in self.interface or "nrfsniffer" in self.interface or self.interface[-5:] == ".pcap":
try:
if ((cryptoInstance is None) or (cryptoInstance is not None and not cryptoInstance.ready)) and self.encrypted:
new = BLEEncryptedPacket(connectionHandle = 1, data = bytes(packet[BTLE_DATA]))
else:
if BTLE_ADV in packet:
if BTLE_CONNECT_REQ in packet:
new = BLEConnectRequest(
srcAddr=packet.InitA,
dstAddr=packet.AdvA,
srcAddrType="public" if 0 == packet.TxAdd else "random",
dstAddrType="public" if 0 == packet.RxAdd else "random",
accessAddress=packet.AA,
crcInit=packet.crc_init,
winSize=packet.win_size,
winOffset=packet.win_offset,
hopInterval=packet.interval,
latency=packet.latency,
timeout=packet.timeout,
channelMap =packet.chM,
SCA=packet.SCA,
hopIncrement=packet.hop,
data=raw(packet[BTLE_ADV])
)
else:
try:
advType = ADV_TYPES[packet.PDU_type]
except:
advType = "???"
try:
data = packet.data
except:
data = b""
if advType == "CONNECT_REQ":
new = BLEConnectRequest(
srcAddr=packet.InitA,
dstAddr=packet.AdvA,
srcAddrType="public" if 0 == packet.TxAdd else "random",
dstAddrType="public" if 0 == packet.RxAdd else "random",
accessAddress=packet.AA,
crcInit=packet.crc_init,
winSize=packet.win_size,
winOffset=packet.win_offset,
hopInterval=packet.interval,
latency=packet.latency,
timeout=packet.timeout,
channelMap =packet.chM,
SCA=packet.SCA,
hopIncrement=packet.hop,
data=data
)
elif advType == "ADV_IND":
new = BLEAdvInd(
addr=packet.AdvA,
addrType="public" if 0 == packet.TxAdd else "random",
data=data)
elif advType == "ADV_DIRECT_IND":
new = BLEAdvDirectInd(
srcAddr=packet.AdvA,
srcAddrType="public" if 0 == packet.TxAdd else "random",
dstAddr=packet.InitA,
dstAddrType="public" if 0 == packet.RxAdd else "random")
elif advType == "ADV_NONCONN_IND":
new = BLEAdvNonConnInd()
elif advType == "ADV_SCAN_IND":
new = BLEAdvScanInd()
elif advType == "SCAN_REQ":
new = BLEScanRequest(
srcAddr=packet.ScanA,
srcAddrType="public" if 0 == packet.TxAdd else "random",
dstAddr=packet.AdvA,
dstAddrType="public" if 0 == packet.RxAdd else "random")
elif advType == "SCAN_RSP":
new = BLEScanResponse(
addr=packet.AdvA,
addrType="public" if 0 == packet.TxAdd else "random",
data=data)
else:
new = BLEAdvertisement( addr = packet.AdvA,
addrType=packet.RxAdd,
data=data,
type=advType)
elif packet.LLID == 1:
new = BLEEmptyPDU()
elif packet.LLID == 2:
if ATT_Exchange_MTU_Request in packet:
new = BLEExchangeMTURequest(
mtu = packet[ATT_Exchange_MTU_Request].mtu
)
elif ATT_Error_Response in packet:
new = BLEErrorResponse(
request = packet.request,
handle = packet[ATT_Error_Response].handle,
ecode = packet.ecode
)
elif ATT_Exchange_MTU_Response in packet:
new = BLEExchangeMTUResponse(
mtu = packet[ATT_Exchange_MTU_Response].mtu
)
elif ATT_Read_Response in packet :
new = BLEReadResponse(
value = packet[ATT_Read_Response].value
)
elif ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0xb:
new = BLEReadResponse(
value = b""
)
elif ATT_Read_Request in packet:
new = BLEReadRequest(
handle = packet[ATT_Read_Request].gatt_handle
)
elif ATT_Read_By_Group_Type_Response in packet:
new = BLEReadByGroupTypeResponse(
length = packet[ATT_Read_By_Group_Type_Response].length,
data = packet[ATT_Read_By_Group_Type_Response].data
)
elif ATT_Read_By_Group_Type_Request in packet:
new = BLEReadByGroupTypeRequest(
startHandle = packet[ATT_Read_By_Group_Type_Request].start,
endHandle = packet[ATT_Read_By_Group_Type_Request].end,
uuid =packet[ATT_Read_By_Group_Type_Request].uuid
)
elif ATT_Read_By_Type_Response in packet:
new = BLEReadByTypeResponse(
data = bytes(packet[ATT_Read_By_Type_Response])
)
elif ATT_Read_By_Type_Request in packet:
new = BLEReadByTypeRequest(
startHandle = packet[ATT_Read_By_Type_Request].start,
endHandle = packet[ATT_Read_By_Type_Request].end,
uuid=packet[ATT_Read_By_Type_Request].uuid
)
elif New_ATT_Handle_Value_Notification in packet:
new = BLEHandleValueNotification(
handle = packet[New_ATT_Handle_Value_Notification].handle,
value = packet[New_ATT_Handle_Value_Notification].value
)
elif New_ATT_Handle_Value_Indication in packet:
new = BLEHandleValueIndication(
connectionHandle = packet.handle,
handle = packet[New_ATT_Handle_Value_Indication].gatt_handle,
value = packet[New_ATT_Handle_Value_Indication].value
)
elif New_ATT_Handle_Value_Confirmation in packet or (ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0x1e):
new = BLEHandleValueConfirmation(connectionHandle = packet.handle)
elif New_ATT_Read_Blob_Request in packet:
new = BLEReadBlobRequest(
handle = packet[New_ATT_Read_Blob_Request].gatt_handle,
offset = packet[New_ATT_Read_Blob_Request].offset
)
elif New_ATT_Read_Blob_Response in packet:
new = BLEReadBlobResponse(
value = packet[New_ATT_Read_Blob_Response].value
)
elif ATT_Write_Response in packet or (ATT_Hdr in packet and packet[ATT_Hdr].opcode == 0x13):
new = BLEWriteResponse()
elif ATT_Write_Request in packet:
new = BLEWriteRequest(
handle = packet.gatt_handle,
value = packet.data
)
elif ATT_Write_Command in packet:
new = BLEWriteCommand(
handle = packet.gatt_handle,
value = packet.data
)
elif ATT_Find_Information_Request in packet:
new = BLEFindInformationRequest(
startHandle=packet.start,
endHandle=packet.end
)
elif ATT_Find_Information_Response in packet:
new = BLEFindInformationResponse(
data=bytes(packet[ATT_Find_Information_Response])[1:],
format=packet.format
)
elif SM_Security_Request in packet:
return BLESecurityRequest(
connectionHandle = packet.handle,
authentication = packet.authentication)
elif SM_Pairing_Request in packet:
new = BLEPairingRequest(
inputOutputCapability=packet.iocap,
outOfBand=packet.oob == 1,
authentication=packet.authentication,
initiatorKeyDistribution=packet.initiator_key_distribution,
responderKeyDistribution=packet.responder_key_distribution,
payload=raw(packet[SM_Hdr:]))
elif SM_Pairing_Response in packet:
new = BLEPairingResponse(
inputOutputCapability=packet.iocap,
outOfBand=packet.oob == 1,
authentication=packet.authentication,
initiatorKeyDistribution=packet.initiator_key_distribution,
responderKeyDistribution=packet.responder_key_distribution,
payload=raw(packet[SM_Hdr:]))
elif SM_Failed in packet:
new = BLEPairingFailed(reason=packet.reason)
elif SM_Confirm in packet:
new = BLEPairingConfirm(confirm=packet.confirm)
elif SM_Random in packet:
new = BLEPairingRandom(random=packet.random)
elif SM_Encryption_Information in packet:
new = BLEEncryptionInformation(ltk=packet.ltk)
elif SM_Master_Identification in packet:
new = BLEMasterIdentification(
ediv=packet.ediv,
rand=packet.rand)
elif SM_Identity_Information in packet:
new = BLEIdentityInformation(irk=packet.irk)
elif SM_Identity_Address_Information in packet:
new = BLEIdentityAddressInformation(
type="public" if packet.atype == 0 else "random",
address=packet.address)
elif SM_Signing_Information in packet:
new = BLESigningInformation(
csrk=packet.csrk)
elif ATT_Find_By_Type_Value_Request in packet:
new = BLEFindByTypeValueRequest(
startHandle=packet[ATT_Find_By_Type_Value_Request].start,
endHandle=packet[ATT_Find_By_Type_Value_Request].end,
uuid=packet[ATT_Find_By_Type_Value_Request].uuid,
data=packet[ATT_Find_By_Type_Value_Request].data)
elif ATT_Find_By_Type_Value_Response in packet:
new = BLEFindByTypeValueResponse(handles=packet[ATT_Find_By_Type_Value_Response].handles)
elif L2CAP_Connection_Parameter_Update_Request in packet:
new = BLEConnectionParameterUpdateRequest(
maxInterval=packet.max_interval,
minInterval=packet.min_interval,
timeoutMult=packet.timeout_mult,
slaveLatency=packet.slave_latency
)
elif L2CAP_Connection_Parameter_Update_Response in packet:
new = BLEConnectionParameterUpdateResponse(
moveResult=packet.move_result
)
elif packet.LLID == 3:
try:
controlType = CONTROL_TYPES[packet.optcode]
except:
controlType = "???"
try:
data = bytes(packet[ControlPDU:])[1:]
except:
data = b""
if controlType == "LL_ENC_REQ":
#packet.show()
if cryptoInstance is not None:
cryptoInstance.setMasterValues(packet.skd,packet.iv)
elif controlType == "LL_ENC_RSP":
#packet.show()
if cryptoInstance is not None:
cryptoInstance.setSlaveValues(packet.skd,packet.iv)
elif controlType == "LL_START_ENC_REQ":
self.encrypted = True
if cryptoInstance is not None:
cryptoInstance.generateSessionKey()
new = BLEControlPDU(type=controlType,data=data)
except:
new = BLEPacket()
new.packet = packet
if "ubertooth" in self.interface:
new.additionalInformations = BLESniffingParameters(
rssi_min = packet.rssi_min,
rssi_max = packet.rssi_max,
rssi_avg = packet.rssi_avg,
rssi_count = packet.rssi_count,
frequency=packet.channel,
clk_100ns=packet.clk_100ns,
clkn_high=packet.clkn_high
)
elif "microbit" in self.interface:
new.additionalInformations = BLESniffingParameters(
rssi = packet.rssi_avg,
rssi_count = packet.rssi_count,
clk_100ns = packet.btle_clk_100ns,
clkn_high = packet.btle_clkn_high,
channel = packet.btle_channel
)
elif "nrfsniffer" in self.interface:
new.additionalInformations = BLESniffingParameters(
rssi = packet.rssi_avg,
rssi_count = packet.rssi_count,
clk_100ns = packet.btle_clk_100ns,
clkn_high = packet.btle_clkn_high,
channel = packet.btle_channel
)
elif ".pcap" in self.interface:
new.additionalInformations = BLESniffingParameters(
rssi = packet.rssi_avg,
rssi_count = packet.rssi_count,
clk_100ns = packet.btle_clk_100ns,
clkn_high = packet.btle_clkn_high,
channel = packet.btle_channel
)
return new
WirelessModule.registerEmitter("ble",BLEEmitter)
WirelessModule.registerReceiver("ble",BLEReceiver)
| 37.117845
| 227
| 0.688879
|
5e7cadc00efd429364fb4288e669d962d758488b
| 5,067
|
py
|
Python
|
odziez/orders/views.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
odziez/orders/views.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
odziez/orders/views.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
from django.core.mail import EmailMessage
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse_lazy
from django.utils.timezone import localdate
from django.views import generic
from clothes.models import Clothe, KindOfClothe
from employees.models import Employee, Manager, Supervisor
from .forms import OrderSendToSupervisorUpdateForm
from .models import Order
class OrdersListView(generic.ListView):
context_object_name = 'orders'
model = Order
template_name = "orders/orders.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['order_during_composing'] = self.queryset.filter(during_composing = True).exists()
context['orders_sent_to_supervisor'] = self.queryset.filter(sent_to_supervisor = True).exists()
context['orders_waiting_for_delivery'] = self.queryset.filter(sent_to_manufacturer = True).exists()
return context
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
manager = Manager.objects.get(pk = self.request.user.manager.pk)
self.queryset = Order.objects.all().filter(manager = manager)
return self.queryset
class OrderNextOrSendTemplateView(generic.TemplateView):
template_name = "orders/order-next-or-send.html"
class OrdersArchivedListView(generic.ListView):
template_name = "orders/archived.html"
class OrdersPreparedTemplateView(generic.TemplateView):
template_name = "orders/prepared.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['order'] = self.get_order()
context['clothes'] = self.get_clothes()
return context
def get_order(self):
orders = Order.objects.all()
orders = orders.filter(
manager = Manager.objects.get(pk = self.request.user.manager.pk)
)
orders = orders.select_related('place_of_delivery', )
self.order = orders.get(during_composing = True)
return self.order
def get_clothes(self):
clothes = Clothe.objects.all()
clothes = clothes.filter(order = self.order)
clothes = clothes.select_related('employee', 'kind', )
return clothes.values(
'id',
'kind__name',
'employee__name',
'employee__surname',
)
class OrderSendUpdateView(generic.UpdateView):
form_class = OrderSendToSupervisorUpdateForm
model = Order
success_url = reverse_lazy('employees:employees')
template_name = "orders/send-to-supervisor.html"
def form_valid(self, form):
form.instance.during_composing = False
form.instance.composed = True
form.instance.sent_to_supervisor = True
form.instance.date_of_sending_to_supervisor = localdate()
#supervisor = Supervisor.objects.first()
#email = EmailMessage(
# subject = 'Zamówienie odzieży roboczej',
# body = 'W aplikacji jest nowe zamówienie',
# to = [supervisor.email],
# )
#email.send()
print('------ date ', form.instance.date_of_sending_to_supervisor)
return super().form_valid(form)
class OrdersAtSupervisorListView(generic.ListView):
context_object_name = 'orders'
model = Order
template_name = 'orders/at-supervisor.html'
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
manager = Manager.objects.get(pk = self.request.user.manager.pk)
queryset = Order.objects.all().filter(manager = manager)
queryset = queryset.filter(sent_to_supervisor = True)
queryset = queryset.filter(approved_by_supervisor = False)
return queryset
class OrdersAtSupervisorDetailView(generic.DetailView):
context_object_name = 'order'
model = Order
template_name = 'orders/at-supervisor-detail.html'
def get_clothes(self):
clothes = Clothe.objects.all()
clothes = clothes.filter(order = self.get_object())
clothes = clothes.select_related('employee', 'kind', )
return clothes.values(
'id',
'kind__name',
'employee__name',
'employee__surname',
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['clothes'] = self.get_clothes()
return context
class OrderSentToManufacturerListView(generic.ListView):
context_object_name = 'orders'
model = Order
template_name = 'orders/sent-list.html'
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
manager = Manager.objects.get(pk = self.request.user.manager.pk)
queryset = Order.objects.all().filter(manager = manager)
queryset = queryset.filter(sent_to_manufacturer = True)
return queryset
class OrderSentDetailView(generic.DetailView):
context_object_name = 'order'
model = Order
template_name = 'orders/sent-detail.html'
| 34.944828
| 107
| 0.677916
|
90ca26646cd80fbdd4e99fc91026282aec822442
| 7,379
|
py
|
Python
|
investmentportfolio.py
|
mikolajroszak/Analyze-Investment-Portfolio
|
6e3c30c9e6c96e007e1a81053cccfee52996b714
|
[
"IBM-pibs"
] | 14
|
2018-10-15T07:12:53.000Z
|
2021-09-23T02:29:29.000Z
|
investmentportfolio.py
|
mikolajroszak/Analyze-Investment-Portfolio
|
6e3c30c9e6c96e007e1a81053cccfee52996b714
|
[
"IBM-pibs"
] | null | null | null |
investmentportfolio.py
|
mikolajroszak/Analyze-Investment-Portfolio
|
6e3c30c9e6c96e007e1a81053cccfee52996b714
|
[
"IBM-pibs"
] | 13
|
2018-08-24T20:42:52.000Z
|
2020-10-01T04:33:51.000Z
|
import requests
import json
import argparse
from dotenv import load_dotenv
import os
import datetime
#Initalize Investment Portfolio Service credentials to find on Bluemix otherwise from .env file
if 'VCAP_SERVICES' in os.environ:
vcap_servicesData = json.loads(os.environ['VCAP_SERVICES'])
# Log the fact that we successfully found some service information.
print("Got vcap_servicesData\n")
#print(vcap_servicesData)
# Look for the IP service instance.
IP_W_username=vcap_servicesData['fss-portfolio-service'][0]['credentials']['writer']['userid']
IP_W_password=vcap_servicesData['fss-portfolio-service'][0]['credentials']['writer']['password']
IP_R_username=vcap_servicesData['fss-portfolio-service'][0]['credentials']['reader']['userid']
IP_R_password=vcap_servicesData['fss-portfolio-service'][0]['credentials']['reader']['password']
# Log the fact that we successfully found credentials
print("Got IP credentials\n")
else:
load_dotenv(os.path.join(os.path.dirname(__file__), ".env"))
IP_W_username=os.environ.get("CRED_PORTFOLIO_USERID_W")
IP_W_password=os.environ.get("CRED_PORTFOLIO_PWD_W")
IP_R_username=os.environ.get("CRED_PORTFOLIO_USERID_R")
IP_R_password=os.environ.get("CRED_PORTFOLIO_PWD_R")
def Get_Portfolios(name=""):
"""
Retreives portfolio data by calling the Investment Portfolio service
"""
print ("Get Portfolios")
#call the url
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + name
headers = {
'accept': "application/json",
'content-type': "application/json"
}
get_data = requests.get(BASEURL, auth=(IP_R_username, IP_R_password), headers=headers)
print("Investment Portfolio status: " + str(get_data.status_code))
# return json data
data = get_data.json()
return data
def Get_Portfolio_Holdings(Portfolio,latest=True):
"""
Retreives holdinga data from the Investment Portfolio service for the Portfolio
"""
print ("Get Portfolio Holdings for " + Portfolio)
#construct the url
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + Portfolio + "/holdings"
if latest:
BASEURL += "?latest=true"
#call the url
headers = {
'accept': "application/json",
'content-type': "application/json"
}
get_data = requests.get(BASEURL, auth=(IP_R_username, IP_R_password), headers=headers)
print("Investment Portfolio - Get Portfolio Holdings status: " + str(get_data.status_code))
#return json data
data = get_data.json()
return data
def Get_Portfolios_by_Selector(selector,value):
"""
Retreives portfolio data by calling the Investment Portfolio service
"""
print ("Get Portfolios by Selector")
#call the url
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/_find"
headers = {
'accept': "application/json",
'content-type': "application/json"
}
s = {
'dataSelector':{
selector:value
}
}
get_data = requests.post(BASEURL, auth=(IP_R_username, IP_R_password), headers=headers, data=json.dumps(s))
print("Investment Portfolio status: " + str(get_data.status_code))
# return json data
data = get_data.json()
return data
def Get_Holdings_by_Selector(portfolio,selector,value):
"""
Retreives portfolio holdings data by calling the Investment Portfolio service
"""
print ("Get Portfolios by Selector")
#call the url
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + portfolio + "/holdings/_find"
headers = {
'accept': "application/json",
'content-type': "application/json"
}
s = {
'dataSelector':{
str(selector):str(value)
}
}
get_data = requests.post(BASEURL, auth=(IP_R_username, IP_R_password), headers=headers, data=json.dumps(s))
print("Investment Portfolio status: " + str(get_data.status_code))
# return json data
data = get_data.json()
return data
def Create_Portfolio(Portfolio):
"""
Creates portfolio in the database.
"""
print('Create_Portfolio')
#construct the url
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios"
headers = {
'Content-Type': "application/json",
'Accept': "application/json"
}
get_data = requests.post(BASEURL, auth=(IP_W_username, IP_W_password), headers=headers, data=json.dumps(Portfolio))
#print the status and returned json
status = get_data.status_code
print("Investment Portfolio status: " + str(status))
if status != 200:
return get_data
else:
data = get_data.json()
return json.dumps(data, indent=4, sort_keys=True)
def Create_Portfolio_Holdings(portfolio_name,holdings):
"""
Creates portfolio holdings.
"""
print('Create_Portfolio holdings')
timestamp = '{:%Y-%m-%dT%H:%M:%S.%fZ}'.format(datetime.datetime.now())
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + portfolio_name + "/holdings"
headers = {
'Content-Type': "application/json",
'Accept': "application/json"
}
data = {
'timestamp': timestamp,
'holdings': holdings
}
get_data = requests.post(BASEURL, auth=(IP_W_username, IP_W_password), headers=headers, data=json.dumps(data))
#print the status and returned json
status = get_data.status_code
print("Investment Portfolio Holding status: " + str(status))
if status != 200:
return get_data.json()
else:
data = get_data.json()
return json.dumps(data, indent=4, sort_keys=True)
def Delete_Portfolio(portfolio_name,timestamp,rev):
"""
Deletes a portfolio.
"""
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + str(portfolio_name) + "/" + str(timestamp) + "?rev=" + str(rev)
headers = {
'Content-Type': "application/json",
'Accept': "application/json",
'Authorization':"Basic aGV5cmVsc2VuZG9udHJhdGlyc2VudWVuOjM4NDUzNTZjNzY2NTY4NTA0YjkyYzM3ZDJiOGVkZTkzZWYzMTg0NTA="
}
res = requests.delete(BASEURL, auth=(IP_W_username, IP_W_password), headers=headers)
#print the status and returned json
status = res.status_code
print("Investment Portfolio delete status: " + str(status))
if status != 200:
return res
else:
return "Portfolio " + portfolio_name + " deleted successfully."
def Delete_Portfolio_Holdings(portfolio_name,timestamp,rev):
"""
Deletes portfolio holdings.
"""
BASEURL = "https://investment-portfolio.mybluemix.net/api/v1/portfolios/" + str(portfolio_name) + "/holdings/" + str(timestamp) + "?rev=" + str(rev)
print(BASEURL)
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'Accept': "application/json",
'authorization':'Basic REPLACE_BASIC_AUTH'
}
res = requests.delete(BASEURL, auth=(IP_W_username, IP_W_password), headers=headers)
#print the status and returned json
status = res.status_code
print("Investment Portfolio holdings delete status: " + str(status))
if status != 200:
return res
else:
return "Portfolio " + portfolio_name + " deleted successfully."
| 36.529703
| 152
| 0.675566
|
65551497f8f48ab00d345eb3479d937eb3396b1e
| 592
|
py
|
Python
|
Leetcode-python/_Python3/Threading/thread.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | 3
|
2021-12-17T17:12:23.000Z
|
2022-03-29T13:41:21.000Z
|
Leetcode-python/_Python3/Threading/thread.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | null | null | null |
Leetcode-python/_Python3/Threading/thread.py
|
gnsalok/algo-ds-python
|
6e37f9a536c634673451d9acaeb4968536fb0b8b
|
[
"MIT"
] | null | null | null |
import threading
def printCube(num):
print("Cube: {}\n".format(num*num*num))
def printSquare(num):
print("Square: {}\n".format(num * num))
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=printSquare, args=(10,))
t2 = threading.Thread(target=printCube, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
| 16
| 57
| 0.628378
|
5e0739a7f7e0292c4cbddb47d069074bf392ae45
| 5,511
|
py
|
Python
|
stats/cli/commands.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | null | null | null |
stats/cli/commands.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | null | null | null |
stats/cli/commands.py
|
48ix/stats
|
4b7ae032c4db3d7e01ee48e4af071d793753da1a
|
[
"MIT"
] | 1
|
2020-10-22T00:00:42.000Z
|
2020-10-22T00:00:42.000Z
|
"""CLI Commands & Configuration."""
# Standard Library
import asyncio
# Third Party
from click import CommandCollection, group, option, prompt, argument
# Project
from stats.cli.echo import Echo
echo = Echo()
@group()
def main():
"""Initialize CLI commands."""
pass
def aiorun(coro, *args, **kwargs):
"""Safely await a coroutine with arguments, print a pretty response."""
try:
res = asyncio.run(coro(*args, **kwargs))
return res
except Exception:
echo.console.print_exception()
@main.command()
@argument("port-id")
@option("-t", "--time", help="Number of previous hours to query")
@option("-d", "--direction", required=True, help="In or Out")
@option("-l", "--limit", required=False, default=100, help="Number of entries")
def port_utilization(port_id, time, direction, limit):
"""Get utilization statistics for a port."""
# Project
from stats.actions.utilization import port_utilization_period
echo(
aiorun(
port_utilization_period,
port_id=port_id,
period=time,
direction=direction,
limit=limit,
)
)
@main.command()
@argument("port-id")
@option("-t", "--time", default=1, help="Number of previous hours to query")
@option("-d", "--direction", required=True, help="In or Out")
@option("-l", "--limit", required=False, default=100, help="Number of entries")
def port_average(port_id, time, direction, limit):
"""Get utilization statistics for a port."""
# Project
from stats.actions.utilization import port_average_period
echo(
aiorun(
port_average_period,
port_id=port_id,
period=time,
direction=direction,
limit=limit,
)
)
@main.command()
@option("-a", "--listen-address", default="::1", help="HTTP Listen Address")
@option("-p", "--listen-port", default=8001, help="HTTP Listen Port")
@option("-d", "--debug", default=False, is_flag=True, help="Enable debugging")
@option("--direct", is_flag=True, default=False, help="Bypass Gunicorn")
def start(listen_address, listen_port, debug, direct):
"""Start the Stats REST API."""
# Project
from stats.main import start as gunicorn_start
from stats.api.main import start as direct_start
mode = "(WSGI)"
if direct:
mode = "(Direct)"
echo("Starting stats API on {}:{} {}...", listen_address, listen_port, mode)
try:
if direct:
direct_start(host=listen_address, port=listen_port, debug=debug)
else:
gunicorn_start()
except Exception:
echo.console.print_exception()
@main.command()
def create_api_user():
"""Create an API User."""
# Standard Library
import secrets
# Project
from stats.auth.main import authdb_stop, create_user, authdb_start
async def _create(_user, _key):
await authdb_start()
await create_user(_user, _key)
await authdb_stop()
username = prompt("Username", type=str)
key = secrets.token_urlsafe(16)
loop = asyncio.new_event_loop()
loop.run_until_complete(_create(username, key))
echo(
"""Generated API User:
Username: {}
API Key: {}
""",
username,
key,
)
@main.command()
@argument("username")
def delete_api_user(username):
"""Delete an API User."""
# Project
from stats.auth.main import authdb_stop, delete_user, authdb_start
async def _coro(_user):
await authdb_start()
await delete_user(_user)
await authdb_stop()
loop = asyncio.new_event_loop()
loop.run_until_complete(_coro(username))
echo("Deleted user {}", username)
@main.command()
@argument("route")
def create_api_route(route):
"""Create an API Route."""
# Project
from stats.auth.main import authdb_stop, authdb_start, create_route
async def _create(_route):
await authdb_start()
await create_route(_route)
await authdb_stop()
loop = asyncio.new_event_loop()
loop.run_until_complete(_create(route))
echo("Generated API Route {}", route)
@main.command()
@argument("route")
def delete_api_route(route):
"""Delete an API User."""
# Project
from stats.auth.main import authdb_stop, authdb_start, delete_route
async def _coro(_route):
await authdb_start()
await delete_route(_route)
await authdb_stop()
loop = asyncio.new_event_loop()
loop.run_until_complete(_coro(route))
echo("Deleted route {}", route)
@main.command()
@argument("route")
@argument("username")
@option("--delete", is_flag=True, default=False, help="Delete the association")
def route_to_user(route, username, delete):
"""Associate a route with a username."""
# Project
from stats.auth.main import (
authdb_stop,
authdb_start,
associate_route,
disassociate_route,
)
try:
coro = associate_route
msg = "Associated Route {} with User {}"
if delete:
coro = disassociate_route
msg = "Disassociated Route {} with User {}"
async def _create(_route, _user):
await authdb_start()
await coro(_user, _route)
await authdb_stop()
loop = asyncio.new_event_loop()
loop.run_until_complete(_create(route, username))
echo(msg, route, username)
except Exception as err:
echo(str(err))
cli = CommandCollection(sources=[main])
| 25.05
| 80
| 0.636908
|
1632dbf8b4dea71adbf28701881d93a57bb7343b
| 11,237
|
py
|
Python
|
cogs/help.py
|
Brettanda/friday-bot
|
d103b1c103052b8d6d12f44456fea3823bd22447
|
[
"CC0-1.0"
] | 5
|
2021-09-04T09:08:55.000Z
|
2022-02-08T00:28:08.000Z
|
cogs/help.py
|
Brettanda/friday-bot
|
d103b1c103052b8d6d12f44456fea3823bd22447
|
[
"CC0-1.0"
] | 2
|
2021-10-11T21:44:08.000Z
|
2021-11-05T07:41:23.000Z
|
cogs/help.py
|
Brettanda/friday-bot
|
d103b1c103052b8d6d12f44456fea3823bd22447
|
[
"CC0-1.0"
] | 3
|
2021-09-04T09:12:34.000Z
|
2021-12-16T20:02:03.000Z
|
# import itertools
# import math
import discord
from discord import Embed
from discord.ext import commands
from discord.ext.menus import ListPageSource, ButtonMenuPages
# from discord.utils import get
# from interactions import Context as SlashContext
# import typing
from typing_extensions import TYPE_CHECKING
# from cogs.cleanup import get_delete_time
from functions import MessageColors, views, MyContext, embed
if TYPE_CHECKING:
from index import Friday as Bot
def get_examples(command: commands.command, prefix: str = "!") -> list:
if command.extras != {} and "examples" in command.extras:
examples, x, ay, gy = [], 0, 0, 0
alias, aliases, group_aliases = None, [command.name, *command.aliases], [command.parent.name, *command.parent.aliases] if command.parent is not None else []
if "NoneType" in str(list(command.clean_params.items())[0][1]):
ay = divmod(x, len(aliases))
alias = aliases[x - (ay[0] * len(aliases))]
gy = divmod(x, len(group_aliases)) if command.parent is not None else 0
group = group_aliases[x - (gy[0] * len(group_aliases))] + " " if command.parent is not None else ""
x += 1
examples.append(f"{prefix}{group}{alias}")
for ex in command.extras["examples"]:
ay = divmod(x, len(aliases))
alias = aliases[x - (ay[0] * len(aliases))]
gy = divmod(x, len(group_aliases)) if command.parent is not None else 0
group = group_aliases[x - (gy[0] * len(group_aliases))] + " " if command.parent is not None else ""
examples.append(f"{prefix}{group}{alias} {ex}")
x += 1
return examples
return []
def get_params(com):
params = []
for key, value in com.params.items():
if key not in ("self", "ctx"):
if com.usage is not None:
# params.append(f"[{command.usage}]" if "NoneType" in str(value) else f"<{command.usage}>")
params = f"{com.usage}" if "NoneType" in str(value) else f"{com.usage}"
else:
post_key = "..." if "Greedy" in str(value) else ""
equals = str(value).split(' = ')[1] if len(str(value).split(' = ')) > 1 else str(None)
follow_key = f"={equals}" if equals != str(None) else ""
# params.append(f"[{key}{follow_key}]{post_key}" if "_Greedy" in str(value) or "NoneType" in str(value) else f"<{key}>")
params.append(f"[{key}{follow_key}]{post_key}" if "NoneType" in str(value) else f"<{key}>{post_key}")
if isinstance(params, list):
params = " ".join(params)
return params
def syntax(command, prefix: str = "!", quotes: bool = True):
cmd_and_aliases = "|".join([str(command), *command.aliases])
sub_commands = ""
if hasattr(command, "commands"):
for com in sorted(command.commands, key=lambda x: x.qualified_name):
if not com.hidden and com.enabled is not False:
sub_commands += f"\n{prefix}{cmd_and_aliases} {com.name} {get_params(com)}"
# sub_commands = "".join(str(command.commands) if hasattr(command,"commands") else "")
if quotes:
return f"```{prefix}{cmd_and_aliases} {get_params(command)}{sub_commands}```"
else:
return f"{prefix}{cmd_and_aliases} {get_params(command)}{sub_commands}"
class MyMenuPages(ButtonMenuPages):
def __init__(self, source, **kwargs):
super().__init__(source=source, timeout=60.0, **kwargs)
self._source = source
self.current_page = 0
self.ctx = None
self.message = None
for item in views.Links().links:
self.add_item(item)
async def start(self, ctx, *, channel: discord.TextChannel = None, wait=False) -> None:
await self._source._prepare_once()
self.ctx = ctx
self.message = await self.send_initial_message(ctx, ctx.channel)
async def send_initial_message(self, ctx: "MyContext", channel: discord.TextChannel):
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
return await ctx.send(**kwargs)
async def _get_kwargs_from_page(self, page):
value = await super()._get_kwargs_from_page(page)
if "view" not in value:
value.update({"view": self})
return value
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user and interaction.user == self.ctx.author:
return True
else:
await interaction.response.send_message('This help menu is not for you.', ephemeral=True)
return False
def stop(self):
try:
self.ctx.bot.loop.create_task(self.message.delete())
except discord.NotFound:
pass
super().stop()
async def on_timeout(self) -> None:
self.stop()
class HelpMenu(ListPageSource):
def __init__(self, ctx, data, *, title="Commands", description=""):
self.ctx = ctx
self.title = title
self.description = description
super().__init__(data, per_page=6)
async def write_page(self, menu: MyMenuPages, fields: list = None):
if fields is None:
fields = []
offset = (menu.current_page * self.per_page) + 1
len_data = len(self.entries)
embed = Embed(
title=self.title,
description=self.description,
colour=MessageColors.DEFAULT
)
embed.set_thumbnail(url=self.ctx.bot.user.display_avatar.url)
embed.set_footer(text=f"{offset:,} - {min(len_data, offset+self.per_page-1):,} of {len_data:,} commands.")
for name, value in fields:
embed.add_field(name=name, value=value, inline=False)
return embed
async def format_page(self, menu: MyMenuPages, entries: [commands.Command]):
fields = []
for entry in entries:
fields.append((entry.cog_name or "No description", syntax(entry, self.ctx.clean_prefix)))
return await self.write_page(menu, fields)
class Help(commands.HelpCommand):
def __init__(self):
super().__init__(command_attrs={"help": "Show help about the bot, a command, or a category.", "case_insensitive": True}, case_insensitive=True)
async def send_error_message(self, error):
return await self.context.reply(embed=embed(title=str(error), color=MessageColors.ERROR))
def get_command_signature(self, command: commands.command) -> str:
return '\n'.join(syntax(command, self.context.clean_prefix, quotes=False).split('\n'))
def make_page_embed(self, commands, title="Friday - Help", description="If you would like to make a suggestion for a command please join the [Friday's Development](https://discord.gg/NTRuFjU) and explain your suggestion.\n\nFor more info on how commands work and how to format them please check out [docs.friday-bot.com](https://docs.friday-bot.com/).\n\n**Some commands will only show if you have the correct permissions to use them.**"):
embed = Embed(color=MessageColors.DEFAULT)
embed.title = title
embed.description = description
# embed.set_footer()
if len(commands) == 0:
embed.add_field(
name="Commands",
value="No commands that you can use",
inline=False
)
for command in commands:
signature = (
self.get_command_signature(command)
)
embed.add_field(
name=signature,
value=command.help or "No help found...",
inline=False,
)
return embed
def make_default_embed(self, cogs: [commands.Cog], title="Friday - Help", description=discord.Embed.Empty):
embed = Embed(color=MessageColors.DEFAULT)
embed.title = title
embed.description = description
x = 0
for cog in cogs:
cog, description, command_list = cog
description = f"{description or 'No description'} \n {''.join([f'`{command.qualified_name}` ' for command in command_list])}"
embed.add_field(name=cog.qualified_name, value=description, inline=False)
x += 1
return embed
async def command_callback(self, ctx: "MyContext", *, command=None):
self.context = ctx
return await super().command_callback(ctx, command=command)
async def send_bot_help(self, mapping):
ctx = self.context
ctx.invoked_with = "help"
bot: "Bot" = ctx.bot
commands = []
for com in bot.commands:
try:
if await com.can_run(ctx) and com.hidden is not True and com.enabled is not False:
commands.append(com)
except Exception:
pass
menu = MyMenuPages(
source=HelpMenu(ctx, commands, title="Friday - Help", description="If you would like to make a suggestion for a command please join the [Friday's Development](https://discord.gg/NTRuFjU) and explain your suggestion.\n\nFor more info on how commands work and how to format them please check out [docs.friday-bot.com](https://docs.friday-bot.com/).\n\n**Some commands will only show if you have the correct permissions to use them.**",)
)
await menu.start(ctx)
async def send_cog_help(self, cog):
ctx = self.context
ctx.invoked_with = "help"
# bot: "Bot" = ctx.bot
filtered = await self.filter_commands(cog.get_commands(), sort=True)
embed = self.make_page_embed(
filtered,
title=(cog and cog.qualified_name or "Other") + " Commands",
description=discord.Embed.Empty if cog is None else cog.description
)
await ctx.reply(embed=embed)
async def send_group_help(self, group):
ctx = self.context
ctx.invoked_with = "help"
# bot: "Bot" = ctx.bot
subcommands = group.commands
if len(subcommands) == 0:
return await self.send_command_help(group)
filtered = await self.filter_commands(subcommands, sort=True)
embed = self.make_page_embed(
filtered,
title=self.context.clean_prefix + group.qualified_name,
description=f"{group.description}\n\n{group.help}"
if group.description
else group.help or "No help found..."
)
if group.extras != {}:
if "examples" in group.extras:
embed.add_field(name="Examples", value="```py\n" + "\n".join(get_examples(group, self.context.clean_prefix)) + "```", inline=False)
if "params" in group.extras:
embed.add_field(name="Available Parameters", value="```py\n" + ", ".join(group.extras['params']) + "```", inline=False)
embed.add_field(name="Signature", value="```py\n" + self.get_command_signature(group) + "```", inline=False)
await ctx.reply(embed=embed)
async def send_command_help(self, command: commands.Command):
embed = Embed(color=MessageColors.DEFAULT)
embed.title = self.context.clean_prefix + command.qualified_name
if command.description:
embed.description = f"{command.description}\n\n{command.help}"
else:
embed.description = command.help or "No help found..."
if command.extras != {}:
if "examples" in command.extras:
embed.add_field(name="Examples", value="```py\n" + "\n".join(get_examples(command, self.context.clean_prefix)) + "```", inline=False)
if "params" in command.extras:
embed.add_field(name="Available Parameters", value="```py\n" + ", ".join(command.extras['params']) + "```", inline=False)
embed.add_field(name="Signature", value="```py\n" + self.get_command_signature(command) + "```", inline=False)
await self.context.reply(embed=embed)
def setup(bot: "Bot"):
bot.old_help_command = bot.help_command
bot.help_command = Help()
def teardown(bot: "Bot"):
bot.help_command = bot.old_help_command
| 37.835017
| 442
| 0.671799
|
0d0f53562b223ec1c7396ecc313c7cc59b5a2c19
| 232,161
|
py
|
Python
|
skimage/external/tifffile/tifffile.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | 1
|
2022-01-29T23:04:13.000Z
|
2022-01-29T23:04:13.000Z
|
skimage/external/tifffile/tifffile.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/external/tifffile/tifffile.py
|
thewtex/scikit-image
|
22bb6b94698b8889cbdf26b25d9e4fdb8b968d97
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2017, Christoph Gohlke
# Copyright (c) 2008-2017, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read image and meta data from (bio)TIFF files. Save numpy arrays as TIFF.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data, chroma subsampling,
or EXIF, IPTC, GPS, and XMP metadata is not implemented. Only primary info
records are read for STK, FluoView, MicroManager, and NIH Image formats.
TIFF, the Tagged Image File Format aka Thousands of Incompatible File Formats,
is under the control of Adobe Systems. BigTIFF allows for files greater than
4 GB. STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions
defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss
MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics,
Molecular Dynamics, and the Open Microscopy Environment consortium
respectively.
For command line usage run `python tifffile.py --help`
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2017.01.12
Requirements
------------
* `CPython 2.7 or 3.5 <http://www.python.org>`_ (64-bit recommended)
* `Numpy 1.11 <http://www.numpy.org>`_
* `Matplotlib 1.5 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2017.01.10 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Revisions
---------
2017.01.12
Read Zeiss SEM metadata.
Read OME-TIFF with invalid references to external files.
Rewrite C LZW decoder (5x faster).
Read corrupted LSM files missing EOI code in LZW stream.
2017.01.01
Add option to append images to existing TIFF files.
Read files without pages.
Read S-FEG and Helios NanoLab tags created by FEI software.
Allow saving Color Filter Array (CFA) images.
Add info functions returning more information about TiffFile and TiffPage.
Add option to read specific pages only.
Remove maxpages argument (backwards incompatible).
Remove test_tifffile function.
2016.10.28
Pass 1944 tests.
Improve detection of ImageJ hyperstacks.
Read TVIPS metadata created by EM-MENU (by Marco Oster).
Add option to disable using OME-XML metadata.
Allow non-integer range attributes in modulo tags (by Stuart Berg).
2016.06.21
Do not always memmap contiguous data in page series.
2016.05.13
Add option to specify resolution unit.
Write grayscale images with extra samples when planarconfig is specified.
Do not write RGB color images with 2 samples.
Reorder TiffWriter.save keyword arguments (backwards incompatible).
2016.04.18
Pass 1932 tests.
TiffWriter, imread, and imsave accept open binary file streams.
2016.04.13
Correctly handle reversed fill order in 2 and 4 bps images (bug fix).
Implement reverse_bitorder in C.
2016.03.18
Fix saving additional ImageJ metadata.
2016.02.22
Pass 1920 tests.
Write 8 bytes double tag values using offset if necessary (bug fix).
Add option to disable writing second image description tag.
Detect tags with incorrect counts.
Disable color mapping for LSM.
2015.11.13
Read LSM 6 mosaics.
Add option to specify directory of memory-mapped files.
Add command line options to specify vmin and vmax values for colormapping.
2015.10.06
New helper function to apply colormaps.
Renamed is_palette attributes to is_indexed (backwards incompatible).
Color-mapped samples are now contiguous (backwards incompatible).
Do not color-map ImageJ hyperstacks (backwards incompatible).
Towards supporting Leica SCN.
2015.09.25
Read images with reversed bit order (fill_order is lsb2msb).
2015.09.21
Read RGB OME-TIFF.
Warn about malformed OME-XML.
2015.09.16
Detect some corrupted ImageJ metadata.
Better axes labels for 'shaped' files.
Do not create TiffTags for default values.
Chroma subsampling is not supported.
Memory-map data in TiffPageSeries if possible (optional).
2015.08.17
Pass 1906 tests.
Write ImageJ hyperstacks (optional).
Read and write LZMA compressed data.
Specify datetime when saving (optional).
Save tiled and color-mapped images (optional).
Ignore void byte_counts and offsets if possible.
Ignore bogus image_depth tag created by ISS Vista software.
Decode floating point horizontal differencing (not tiled).
Save image data contiguously if possible.
Only read first IFD from ImageJ files if possible.
Read ImageJ 'raw' format (files larger than 4 GB).
TiffPageSeries class for pages with compatible shape and data type.
Try to read incomplete tiles.
Open file dialog if no filename is passed on command line.
Ignore errors when decoding OME-XML.
Rename decoder functions (backwards incompatible)
2014.08.24
TiffWriter class for incremental writing images.
Simplify examples.
2014.08.19
Add memmap function to FileHandle.
Add function to determine if image data in TiffPage is memory-mappable.
Do not close files if multifile_close parameter is False.
2014.08.10
Pass 1730 tests.
Return all extrasamples by default (backwards incompatible).
Read data from series of pages into memory-mapped array (optional).
Squeeze OME dimensions (backwards incompatible).
Workaround missing EOI code in strips.
Support image and tile depth tags (SGI extension).
Better handling of STK/UIC tags (backwards incompatible).
Disable color mapping for STK.
Julian to datetime converter.
TIFF ASCII type may be NULL separated.
Unwrap strip offsets for LSM files greater than 4 GB.
Correct strip byte counts in compressed LSM files.
Skip missing files in OME series.
Read embedded TIFF files.
2014.02.05
Save rational numbers as type 5 (bug fix).
2013.12.20
Keep other files in OME multi-file series closed.
FileHandle class to abstract binary file handle.
Disable color mapping for bad OME-TIFF produced by bio-formats.
Read bad OME-XML produced by ImageJ when cropping.
2013.11.03
Allow zlib compress data in imsave function (optional).
Memory-map contiguous image data (optional).
2013.10.28
Read MicroManager metadata and little endian ImageJ tag.
Save extra tags in imsave function.
Save tags in ascending order by code (bug fix).
2012.10.18
Accept file like objects (read from OIB files).
2012.08.21
Rename TIFFfile to TiffFile and TIFFpage to TiffPage.
TiffSequence class for reading sequence of TIFF files.
Read UltraQuant tags.
Allow float numbers as resolution in imsave function.
2012.08.03
Read MD GEL tags and NIH Image header.
2012.07.25
Read ImageJ tags.
...
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `python-bioformats <https://github.com/CellProfiler/python-bioformats>`_
* `Imread <https://github.com/luispedro/imread>`_
* `PyLibTiff <https://github.com/pearu/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <https://github.com/vasole/pymca>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `pymimage <https://github.com/ardoi/pymimage>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
* Christian Kliche for help writing tiled and color-mapped files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import lzma
except ImportError:
try:
import backports.lzma as lzma
except ImportError:
lzma = None
try:
if __package__:
from . import _tifffile
else:
import _tifffile
except ImportError:
warnings.warn(
"ImportError: No module named '_tifffile'. "
"Loading of some compressed images will be very slow. "
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2017.01.12'
__docformat__ = 'restructuredtext en'
__all__ = (
'imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter', 'TiffSequence',
# utility functions used in oiffile and czifile
'FileHandle', 'lazyattr', 'natural_sorted', 'decode_lzw', 'stripnull')
def imsave(file, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
file : str or binary stream
File name or writable binary stream, such as a open file or BytesIO.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'append', 'byteorder', 'bigtiff', 'software', and 'imagej',
are passed to the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution', 'compress',
'colormap', 'tile', 'description', 'datetime', 'metadata', 'contiguous'
and 'extratags' are passed to the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'})
"""
tifargs = parse_kwargs(kwargs, 'append', 'bigtiff', 'byteorder',
'software', 'imagej')
if 'bigtiff' not in tifargs and 'imagej' not in tifargs and (
data.size*data.dtype.itemsize > 2000*2**20):
tifargs['bigtiff'] = True
with TiffWriter(file, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the 'close' method, which is
automatically called when using the 'with' context manager.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, '2i': 10, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'document_name': 269,
'image_description': 270, 'strip_offsets': 273, 'orientation': 274,
'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'smin_sample_value': 340, 'smax_sample_value': 341,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, file, append=False, bigtiff=False, byteorder=None,
software='tifffile.py', imagej=False):
"""Open a TIFF file for writing.
Existing files are overwritten by default.
Use bigtiff=True when creating files larger than 2 GB.
Parameters
----------
file : str, binary stream, or FileHandle
File name or writable binary stream, such as a open file
or BytesIO.
The file is created if it does not exist.
append : bool
If True and 'file' is an existing standard TIFF file, image data
and tags are appended to the file.
Appending data may corrupt specifically formatted TIFF files
such as LSM, STK, ImageJ, NIH, or FluoView.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the file.
Saved with the first page in the file only.
imagej : bool
If True, write an ImageJ hyperstack compatible file.
This format can handle data types uint8, uint16, or float32 and
data shapes up to 6 dimensions in TZCYXS order.
RGB images (S=3 or S=4) must be uint8.
ImageJ's default byte order is big endian but this implementation
uses the system's native byte order by default.
ImageJ does not support BigTIFF format or LZMA compression.
The ImageJ file format is undocumented.
"""
if append:
# determine if file is an existing TIFF file that can be extended
try:
with FileHandle(file, mode='rb', size=0) as fh:
pos = fh.tell()
try:
with TiffFile(fh, pages=[0]) as tif:
if (append != 'force' and
any(getattr(tif, 'is_'+a) for a in
('lsm', 'stk', 'imagej', 'nih', 'fluoview',
'micromanager'))):
raise ValueError("contains metadata")
byteorder = tif.byteorder
bigtiff = tif.is_bigtiff
imagej = tif.is_imagej
self._ifd_offset = tif._ifd_offset
if tif.pages:
software = None
except Exception as e:
raise ValueError("can not append to file: %s" % str(e))
finally:
fh.seek(pos)
except (IOError, FileNotFoundError):
append = False
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
if imagej and bigtiff:
warnings.warn("writing incompatible bigtiff ImageJ")
self._byteorder = byteorder
self._software = software
self._imagej = bool(imagej)
self._metadata = None
self._colormap = None
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
self._tags = None
self._shape = None # normalized shape of data in consecutive pages
self._data_shape = None # shape of data in consecutive pages
self._data_dtype = None # data type
self._data_offset = None # offset to data
self._data_byte_counts = None # byte counts per plane
self._tag_offsets = None # strip or tile offset tag code
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._value_format = '8s'
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._value_format = '4s'
if append:
self._fh = FileHandle(file, mode='r+b', size=0)
self._fh.seek(0, 2)
else:
self._fh = FileHandle(file, mode='wb', size=0)
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, tile=None,
contiguous=True, compress=0, colormap=None,
description=None, datetime=None, resolution=None,
metadata={}, extratags=()):
"""Write image data and tags to TIFF file.
Image data are written in one stripe per plane by default.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' tags are derived from
the data type.
Parameters
----------
data : numpy.ndarray
Input image. The last dimensions are assumed to be image depth,
height (length), width, and samples.
If a colormap is provided, the dtype must be uint8 or uint16 and
the data values are indices into the last dimension of the
colormap.
photometric : {'minisblack', 'miniswhite', 'rgb', 'palette', 'cfa'}
The color space of the image data.
By default this setting is inferred from the data shape and the
value of colormap.
For CFA images, DNG tags must be specified in extratags.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
If this parameter is set, extra samples are used to store grayscale
images.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
tile : tuple of int
The shape (depth, length, width) of image tiles to write.
If None (default), image data are written in one stripe per plane.
The tile length and width must be a multiple of 16.
If the tile depth is provided, the SGI image_depth and tile_depth
tags are used to save volume data. Few software can read the
SGI format, e.g. MeVisLab.
contiguous : bool
If True (default) and the data and parameters are compatible with
previous ones, if any, the data are stored contiguously after
the previous one. Parameters 'photometric' and 'planarconfig' are
ignored.
compress : int or 'lzma'
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
Compression cannot be used to write contiguous files.
If 'lzma', LZMA compression is used, which is not available on
all platforms.
colormap : numpy.ndarray
RGB color values for the corresponding data value.
Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16.
description : str
The subject of the image. Saved with the first page only.
Cannot be used with the ImageJ format.
datetime : datetime
Date and time of image creation. Saved with the first page only.
If None (default), the current date and time is used.
resolution : (float, float[, str]) or ((int, int), (int, int)[, str])
X and Y resolutions in pixels per resolution unit as float or
rational numbers.
A third, optional parameter specifies the resolution unit,
which must be None (default for ImageJ), 'inch' (default), or 'cm'.
metadata : dict
Additional meta data to be saved along with shape information
in JSON or ImageJ formats in an image_description tag.
If None, do not write a second image_description tag.
extratags : sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
# TODO: refactor this function
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
value_format = self._value_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
if data.size == 0:
raise ValueError("can not save empty array")
# just append contiguous data if possible
if self._data_shape:
if (not contiguous or
self._data_shape[1:] != data.shape or
self._data_dtype != data.dtype or
(compress and self._tags) or
tile or
not numpy.array_equal(colormap, self._colormap)):
# incompatible shape, dtype, compression mode, or colormap
self._write_remaining_pages()
self._write_image_description()
self._description_offset = 0
self._description_len_offset = 0
self._data_shape = None
self._colormap = None
if self._imagej:
raise ValueError(
"ImageJ does not support non-contiguous data")
else:
# consecutive mode
self._data_shape = (self._data_shape[0] + 1,) + data.shape
if not compress:
# write contiguous data, write ifds/tags later
fh.write_array(data)
return
if photometric not in (None, 'minisblack', 'miniswhite',
'rgb', 'palette', 'cfa'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
# prepare compression
if not compress:
compress = False
compress_tag = 1
elif compress == 'lzma':
compress = lzma.compress
compress_tag = 34925
if self._imagej:
raise ValueError("ImageJ can not handle LZMA compression")
elif not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
elif compress:
def compress(data, level=compress):
return zlib.compress(data, level)
compress_tag = 32946
# prepare ImageJ format
if self._imagej:
if description:
warnings.warn("not writing description to ImageJ file")
description = None
volume = False
if data.dtype.char not in 'BHhf':
raise ValueError("ImageJ does not support data type '%s'"
% data.dtype.char)
ijrgb = photometric == 'rgb' if photometric else None
if data.dtype.char not in 'B':
ijrgb = False
ijshape = imagej_shape(data.shape, ijrgb)
if ijshape[-1] in (3, 4):
photometric = 'rgb'
if data.dtype.char not in 'B':
raise ValueError("ImageJ does not support data type '%s' "
"for RGB" % data.dtype.char)
elif photometric is None:
photometric = 'minisblack'
planarconfig = None
if planarconfig == 'planar':
raise ValueError("ImageJ does not support planar images")
else:
planarconfig = 'contig' if ijrgb else None
# verify colormap and indices
if colormap is not None:
if data.dtype.char not in 'BH':
raise ValueError("invalid data dtype for palette mode")
colormap = numpy.asarray(colormap, dtype=byteorder+'H')
if colormap.shape != (3, 2**(data.itemsize * 8)):
raise ValueError("invalid color map shape")
self._colormap = colormap
# verify tile shape
if tile:
tile = tuple(int(i) for i in tile[:3])
volume = len(tile) == 3
if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or
any(i < 1 for i in tile)):
raise ValueError("invalid tile shape")
else:
tile = ()
volume = False
# normalize data shape to 5D or 6D, depending on volume:
# (pages, planar_samples, [depth,] height, width, contig_samples)
data_shape = data.shape
if photometric == 'rgb':
data = reshape_nd(data, 3)
else:
data = reshape_nd(data, 2)
shape = data.shape
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if colormap is not None:
photometric = 'palette'
planarconfig = None
if photometric is None:
photometric = 'minisblack'
if planarconfig == 'contig':
if data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif planarconfig == 'planar':
if volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif self._imagej:
photometric = 'minisblack'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif photometric == 'cfa':
if len(shape) != 2:
raise ValueError("invalid CFA image")
volume = False
planarconfig = None
data = data.reshape((-1, 1) + shape[-2:] + (1,))
if 50706 not in (et[0] for et in extratags):
raise ValueError("must specify DNG tags for CFA image")
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
# normalize shape to 6D
assert len(data.shape) in (5, 6)
if len(data.shape) == 5:
data = data.reshape(data.shape[:2] + (1,) + data.shape[2:])
shape = data.shape
if tile and not volume:
tile = (1, tile[-2], tile[-1])
if photometric == 'palette':
if (samplesperpixel != 1 or extrasamples or
shape[1] != 1 or shape[-1] != 1):
raise ValueError("invalid data shape for palette mode")
if photometric == 'rgb' and samplesperpixel == 2:
raise ValueError("not a RGB image (samplesperpixel=2)")
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
strip_or_tile = 'tile' if tile else 'strip'
tag_byte_counts = TiffWriter.TAGS[strip_or_tile + '_byte_counts']
tag_offsets = TiffWriter.TAGS[strip_or_tile + '_offsets']
self._tag_offsets = tag_offsets
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value
# Append (code, ifdentry, ifdvalue, writeonce) to tags list
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
rawcount = value.find(b'\0\0')
if rawcount < 0:
rawcount = count
else:
rawcount += 1 # length of string without buffer
value = (value,)
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if struct.calcsize(dtype) * count <= offset_size:
# value(s) can be written directly
if count == 1:
if isinstance(value, (tuple, list, numpy.ndarray)):
value = value[0]
ifdentry.append(pack(value_format, pack(dtype, value)))
else:
ifdentry.append(pack(value_format,
pack(str(count)+dtype, *value)))
else:
# use offset to value(s)
ifdentry.append(pack(offset_format, 0))
if isinstance(value, numpy.ndarray):
assert value.size == count
assert value.dtype.char == dtype
ifdvalue = value.tostring()
elif isinstance(value, (tuple, list)):
ifdvalue = pack(str(count)+dtype, *value)
else:
ifdvalue = pack(dtype, value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if description:
# user provided description
addtag('image_description', 's', 0, description, writeonce=True)
# write shape and metadata to image_description
self._metadata = {} if not metadata else metadata
if self._imagej:
description = imagej_description(
data_shape, shape[-1] in (3, 4), self._colormap is not None,
**self._metadata)
elif metadata or metadata == {}:
description = image_description(
data_shape, self._colormap is not None, **self._metadata)
else:
description = None
if description:
# add 32 bytes buffer
# the image description might be updated later with the final shape
description += b'\0'*32
self._description_len = len(description)
addtag('image_description', 's', 0, description, writeonce=True)
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page in file
if datetime is None:
datetime = self._now()
addtag('datetime', 's', 0, datetime.strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, compress_tag)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if tile:
addtag('tile_width', 'I', 1, tile[-1])
addtag('tile_length', 'I', 1, tile[-2])
if tile[0] > 1:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, tile[0])
addtag('new_subfile_type', 'I', 1, 0)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1, {'miniswhite': 0, 'minisblack': 1,
'rgb': 2, 'palette': 3,
'cfa': 32803}[photometric])
if colormap is not None:
addtag('color_map', 'H', colormap.size, colormap)
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8,) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
if len(resolution) > 2:
resolution_unit = {None: 1, 'inch': 2, 'cm': 3}[resolution[2]]
elif self._imagej:
resolution_unit = 1
else:
resolution_unit = 2
addtag('resolution_unit', 'H', 1, resolution_unit)
if not tile:
addtag('rows_per_strip', 'I', 1, shape[-3]) # * shape[-4]
if tile:
# use one chunk per tile per plane
tiles = ((shape[2] + tile[0] - 1) // tile[0],
(shape[3] + tile[1] - 1) // tile[1],
(shape[4] + tile[2] - 1) // tile[2])
numtiles = product(tiles) * shape[1]
strip_byte_counts = [
product(tile) * shape[-1] * data.dtype.itemsize] * numtiles
addtag(tag_byte_counts, offset_format, numtiles, strip_byte_counts)
addtag(tag_offsets, offset_format, numtiles, [0] * numtiles)
# allocate tile buffer
chunk = numpy.empty(tile + (shape[-1],), dtype=data.dtype)
else:
# use one strip per plane
strip_byte_counts = [
data[0, 0].size * data.dtype.itemsize] * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], [0] * shape[1])
# add extra tags from user
for t in extratags:
addtag(*t)
# TODO: check TIFFReadDirectoryCheckOrder warning in files containing
# multiple tags of same code
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not (self._bigtiff or self._imagej) and (
fh.tell() + data.size*data.dtype.itemsize > 2**31-1):
raise ValueError("data too large for standard TIFF file")
# if not compressed or tiled, write the first ifd and then all data
# contiguously; else, write all ifds and data interleaved
for pageindex in range(shape[0] if (compress or tile) else 1):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
elif tag[0] == 270 and tag[2].endswith(b'\0\0\0\0'):
# image description buffer
self._description_offset = pos
self._description_len_offset = (
tag_offset + tagindex * tag_size + 4)
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
if tile:
for plane in data[pageindex]:
for tz in range(tiles[0]):
for ty in range(tiles[1]):
for tx in range(tiles[2]):
c0 = min(tile[0], shape[2] - tz*tile[0])
c1 = min(tile[1], shape[3] - ty*tile[1])
c2 = min(tile[2], shape[4] - tx*tile[2])
chunk[c0:, c1:, c2:] = 0
chunk[:c0, :c1, :c2] = plane[
tz*tile[0]:tz*tile[0]+c0,
ty*tile[1]:ty*tile[1]+c1,
tx*tile[2]:tx*tile[2]+c2]
if compress:
t = compress(chunk)
strip_byte_counts.append(len(t))
fh.write(t)
else:
fh.write_array(chunk)
fh.flush()
elif compress:
for plane in data[pageindex]:
plane = compress(plane)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
fh.write_array(data)
# update strip/tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip/tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [tag for tag in tags if not tag[-1]]
# if uncompressed, write remaining ifds/tags later
if not (compress or tile):
self._tags = tags
self._shape = shape
self._data_shape = (1,) + data_shape
self._data_dtype = data.dtype
self._data_offset = data_offset
self._data_byte_counts = strip_byte_counts
def _write_remaining_pages(self):
"""Write outstanding IFDs and tags to file."""
if not self._tags:
return
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data_offset = self._data_offset
page_data_size = sum(self._data_byte_counts)
tag_bytes = b''.join(t[1] for t in self._tags)
numpages = self._shape[0] * self._data_shape[0] - 1
pos = fh.tell()
if not self._bigtiff and pos + len(tag_bytes) * numpages > 2**32 - 256:
if self._imagej:
warnings.warn("truncating ImageJ file")
return
raise ValueError("data too large for non-bigtiff file")
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
for _ in range(numpages):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifd entries
fh.write(pack(numtag_format, len(self._tags)))
tag_offset = fh.tell()
fh.write(tag_bytes)
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# offset to image data
data_offset += page_data_size
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(self._tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == self._tag_offsets:
strip_offsets_offset = pos
fh.write(tag[2])
# update strip/tile offsets if necessary
pos = fh.tell()
for tagindex, tag in enumerate(self._tags):
if tag[0] == self._tag_offsets: # strip/tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in self._data_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
break
fh.seek(pos)
self._tags = None
self._data_dtype = None
self._data_offset = None
self._data_byte_counts = None
# do not reset _shape or _data_shape
def _write_image_description(self):
"""Write meta data to image_description tag."""
if (not self._data_shape or self._data_shape[0] == 1 or
self._description_offset <= 0):
return
colormapped = self._colormap is not None
if self._imagej:
isrgb = self._shape[-1] in (3, 4)
description = imagej_description(
self._data_shape, isrgb, colormapped, **self._metadata)
else:
description = image_description(
self._data_shape, colormapped, **self._metadata)
# rewrite description and its length to file
description = description[:self._description_len-1]
pos = self._fh.tell()
self._fh.seek(self._description_offset)
self._fh.write(description)
self._fh.seek(self._description_len_offset)
self._fh.write(struct.pack(self._byteorder+self._offset_format,
len(description)+1))
self._fh.seek(pos)
self._description_offset = 0
self._description_len_offset = 0
self._description_len = 0
def _now(self):
"""Return current date and time."""
return datetime.datetime.now()
def close(self, truncate=False):
"""Write remaining pages (if not truncate) and close file handle."""
if not truncate:
self._write_remaining_pages()
self._write_image_description()
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
Refer to the TiffFile class and member functions for documentation.
Parameters
----------
files : str, binary stream, or sequence
File name, seekable binary stream, glob pattern, or sequence of
file names.
kwargs : dict
Parameters 'multifile', 'multifile_close', 'pages', 'fastij', and
'is_ome' are passed to the TiffFile class.
The 'pattern' parameter is passed to the TiffSequence class.
Other parameters are passed to the asarray functions.
The first image series is returned if no arguments are provided.
Examples
--------
>>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219))
>>> im = imread('temp.tif', key=0)
>>> im.shape
(4, 301, 219)
>>> ims = imread(['temp.tif', 'temp.tif'])
>>> ims.shape
(2, 3, 4, 301, 219)
"""
kwargs_file = parse_kwargs(kwargs, 'multifile', 'multifile_close',
'pages', 'fastij', 'is_ome')
kwargs_seq = parse_kwargs(kwargs, 'pattern')
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if not hasattr(files, 'seek') and len(files) == 1:
files = files[0]
if isinstance(files, basestring) or hasattr(files, 'seek'):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func',)
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the 'close' method, which is
automatically called when using the 'with' context manager.
Attributes
----------
pages : list of TiffPage
All TIFF pages in file.
series : list of TiffPageSeries
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('temp.tif') as tif:
... data = tif.asarray()
... data.shape
(5, 301, 219)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True, pages=None,
fastij=True, is_ome=None):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
pages : sequence of int
Indices of the pages to read. If None (default) all pages are read.
Can be used to read only the first page with pages=[0].
Specifying pages might invalidate series based on metadata.
fastij : bool
If True (default), try to use only the metadata from the first page
of ImageJ files. Significantly speeds up loading movies with
thousands of pages.
is_ome : bool
If False, disable processing of OME-XML metadata.
"""
if is_ome is False:
self.is_ome = False
self._fh = FileHandle(arg, mode='rb',
name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
self._ifd_offset = 0 # offset to offset of next IFD
try:
self._fromfile(pages, fastij)
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self, pages=None, fastij=True):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("invalid TIFF file")
self._is_native = self.byteorder == {'big': '>',
'little': '<'}[sys.byteorder]
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43:
# BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("invalid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self._ifd_offset = self._fh.tell()
self.pages = []
pageindex = -1
while True:
pageindex += 1
skip = pages and pageindex not in pages
try:
page = TiffPage(self, skip)
except StopIteration:
break
if skip:
continue
self.pages.append(page)
if fastij:
if page._patch_imagej():
break # only read the first page of ImageJ files
fastij = False
# TiffPage() leaves the file cursor at offset to offset of next IFD
self._ifd_offset = self._fh.tell()
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
# each series and position require separate unwrapping (undocumented)
for series in self.series:
positions = 1
for i in 0, 1:
if series.axes[i] in 'PM':
positions *= series.shape[i]
positions = len(series.pages) // positions
for i, page in enumerate(series.pages):
if not i % positions:
wrap = 0
previous_offset = 0
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
def asarray(self, key=None, series=None, memmap=False, tempdir=None):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int or TiffPageSeries
Defines which series of pages to return as array.
memmap : bool
If True, return an read-only array stored in a binary file on disk
if possible. The TIFF file is used if possible, else a temporary
file is created.
tempdir : str
The directory where the memory-mapped file will be created.
"""
if not self.pages:
return numpy.array([])
if key is None and series is None:
series = 0
if series is not None:
try:
series = self.series[series]
except (KeyError, TypeError):
pass
pages = series.pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_indexed:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = apply_colormap(result, pages[0].color_map)
else:
result = stack_pages(pages, memmap=memmap, tempdir=tempdir,
colormapped=False, squeeze=False)
elif len(pages) == 1:
result = pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_indexed, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, series.dtype, shape=series.shape)
result = result.reshape(-1)
else:
result = numpy.empty(series.shape, series.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
elif key is None and series and series.offset:
if memmap:
result = self.filehandle.memmap_array(
series.dtype, series.shape, series.offset)
else:
self.filehandle.seek(series.offset)
result = self.filehandle.read_array(
series.dtype, product(series.shape))
else:
result = stack_pages(pages, memmap=memmap, tempdir=tempdir)
if key is None:
try:
result.shape = series.shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, series.shape))
# try series of expected shapes
result.shape = (-1,) + series.shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
elif len(pages) == 1:
result.shape = pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
@lazyattr
def series(self):
"""Return pages with compatible properties as TiffPageSeries."""
if not self.pages:
return []
series = []
if self.is_ome:
series = self._ome_series()
elif self.is_fluoview:
series = self._fluoview_series()
elif self.is_lsm:
series = self._lsm_series()
elif self.is_imagej:
series = self._imagej_series()
elif self.is_nih:
series = self._nih_series()
if not series:
# generic detection of series
shapes = []
pages = {}
index = 0
for page in self.pages:
if not page.shape:
continue
if page.is_shaped:
index += 1 # shape starts a new series
shape = page.shape + (index, page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape in pages:
pages[shape].append(page)
else:
shapes.append(shape)
pages[shape] = [page]
series = []
for s in shapes:
shape = ((len(pages[s]),) + s[:-3] if len(pages[s]) > 1
else s[:-3])
axes = (('I' + s[-2]) if len(pages[s]) > 1 else s[-2])
page0 = pages[s][0]
if page0.is_shaped:
metadata = image_description_dict(page0.is_shaped)
reshape = metadata['shape']
if 'axes' in metadata:
reaxes = metadata['axes']
if len(reaxes) == len(reshape):
axes = reaxes
shape = reshape
else:
warnings.warn("axes do not match shape")
try:
axes = reshape_axes(axes, shape, reshape)
shape = reshape
except ValueError as e:
warnings.warn(str(e))
series.append(
TiffPageSeries(pages[s], shape, page0.dtype, axes))
for i, s in enumerate(series):
s.index = i
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def _fluoview_series(self):
"""Return image series in FluoView file."""
page0 = self.pages[0]
dims = {
b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
axes = ''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1)
shape = tuple(int(i[1]) for i in mmhd if i[1] > 1)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _lsm_series(self):
"""Return image series in LSM file."""
page0 = self.pages[0]
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
if hasattr(lsmi, 'dimension_p') and lsmi.dimension_p > 1:
axes += 'P'
if hasattr(lsmi, 'dimension_m') and lsmi.dimension_m > 1:
axes += 'M'
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
dtype = pages[0].dtype
series = [TiffPageSeries(pages, shape, dtype, axes)]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
dtype = pages[0].dtype
series.append(TiffPageSeries(pages, shape, dtype, axes))
return series
def _imagej_series(self):
"""Return image series in ImageJ file."""
# ImageJ's dimension order is always TZCYXS
# TODO: fix loading of color, composite or palette images
shape = []
axes = []
page0 = self.pages[0]
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not (self.is_rgb and not
ij.get('hyperstack', False)):
shape.append(ij['channels'])
axes.append('C')
remain = ij.get('images', len(self.pages)) // (product(shape)
if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
if page0.axes[0] == 'I':
# contiguous multiple images
shape.extend(page0.shape[1:])
axes.extend(page0.axes[1:])
elif page0.axes[:2] == 'SI':
# color-mapped contiguous multiple images
shape = page0.shape[0:1] + tuple(shape) + page0.shape[2:]
axes = list(page0.axes[0]) + axes + list(page0.axes[2:])
else:
shape.extend(page0.shape)
axes.extend(page0.axes)
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _nih_series(self):
"""Return image series in NIH file."""
page0 = self.pages[0]
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
return [TiffPageSeries(self.pages, shape, page0.dtype, axes)]
def _ome_series(self):
"""Return image series in OME-TIFF file(s)."""
omexml = self.pages[0].tags['image_description'].value
try:
root = etree.fromstring(omexml)
except etree.ParseError as e:
# TODO: test this
warnings.warn("ome-xml: %s" % e)
omexml = omexml.decode('utf-8', 'ignore').encode('utf-8')
root = etree.fromstring(omexml)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
series = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
step = float(along.attrib.get('Step', 1))
start = float(along.attrib['Start'])
stop = float(along.attrib['End']) + step
labels = numpy.arange(start, stop, step)
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * (size // self.pages[0].samples_per_pixel)
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, FileNotFoundError, ValueError):
# TODO: close open file handle
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
series.append(TiffPageSeries(ifds, shape, dtype, axes, self))
for serie in series:
shape = list(serie.shape)
for axis, (newaxis, labels) in modulo.items():
i = serie.axes.index(axis)
size = len(labels)
if shape[i] == size:
serie.axes = serie.axes.replace(axis, newaxis, 1)
else:
shape[i] //= size
shape.insert(i+1, size)
serie.axes = serie.axes.replace(axis, axis+newaxis, 1)
serie.shape = tuple(shape)
# squeeze dimensions
for serie in series:
serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes)
return series
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __str__(self):
"""Return string containing information about file."""
result = [
"TIFF file: %s" % self._fh.name,
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
result.extend((attr for attr in (
'mdgel', 'mediacy', 'stk', 'lsm', 'vista', 'imagej', 'fluoview',
'micromanager', 'nih', 'ome', 'scn', 'tvips', 'fei', 'sem')
if getattr(self, 'is_'+attr)))
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def info(self, series=None, pages=None):
"""Return string with detailed information about file."""
if series is None:
series = self.series
else:
series = [self.series[i] for i in sequence(series)]
result = [str(self)]
for s in series:
result.append(str(s))
if pages is None:
result.append(s.pages[0].info())
if pages is not None:
if pages == 'all':
pages = self.pages
else:
pages = [self.pages[i] for i in sequence(pages)]
for p in pages:
result.append(p.info())
return '\n\n'.join(result)
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
"""File has BigTIFF format."""
return self.offset_size != 4
@lazyattr
def is_rgb(self):
"""File contains only RGB images."""
return self.pages and all(p.is_rgb for p in self.pages)
@lazyattr
def is_indexed(self):
"""File contains only indexed images."""
return self.pages and all(p.is_indexed for p in self.pages)
@lazyattr
def is_mdgel(self):
"""File has MD Gel format."""
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
"""File was created by Media Cybernetics software."""
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
"""File has MetaMorph STK format."""
return self.pages and all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
"""File was created by Carl Zeiss software."""
return len(self.pages) and self.pages[0].is_lsm
@lazyattr
def is_vista(self):
"""File was created by ISS Vista."""
return len(self.pages) and self.pages[0].is_vista
@lazyattr
def is_imagej(self):
"""File has ImageJ format."""
return len(self.pages) and self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
"""File was created by MicroManager."""
return len(self.pages) and self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
"""File has NIH Image format."""
return len(self.pages) and self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
"""File was created by Olympus FluoView."""
return len(self.pages) and self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
"""File has OME-TIFF format."""
return len(self.pages) and self.pages[0].is_ome
@lazyattr
def is_scn(self):
"""File has Leica SCN format."""
return len(self.pages) and self.pages[0].is_scn
@lazyattr
def is_tvips(self):
"""File was created using EM-MENU software."""
return len(self.pages) and self.pages[0].is_tvips
@lazyattr
def is_fei(self):
"""File was created using FEI software."""
return len(self.pages) and self.pages[0].is_fei
@lazyattr
def is_sem(self):
"""File contains Zeiss SEM metadata."""
return len(self.pages) and self.pages[0].is_sem
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, color-mapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
color-mapped and with extra samples if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy.ndarray
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes/images (stk, ij).
1. planar samples_per_pixel.
2. image_depth Z (sgi).
3. image_length Y.
4. image_width X.
5. contig samples_per_pixel.
"""
def __init__(self, parent, skip=False):
"""Initialize instance from file.
If skip, seek to next IFD offset without reading tags.
"""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._offset = 0 # offset to this IDF
self._fromfile(skip)
if skip:
return
self._process_tags()
def _fromfile(self, skip=False):
"""Read TIFF IFD structure and its tags from file.
The file cursor is left at the storage position of the offset to the
next IFD (if any).
Raises StopIteration if offset (first bytes read) is 0
or a corrupted page list is encountered.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
pos = fh.tell()
# read offset to this IFD
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
fh.seek(pos)
raise StopIteration()
if offset >= fh.size:
warnings.warn("invalid page offset > file size")
fh.seek(pos)
raise StopIteration()
self._offset = offset
# read standard tags
fh.seek(offset)
fmt, size, tagsize = {4: ('H', 2, 12), 8: ('Q', 8, 20)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
if numtags > 4096:
raise ValueError("suspicious number of tags")
except Exception:
warnings.warn("corrupted page list at offset %i" % offset)
fh.seek(pos)
raise StopIteration()
if skip:
fh.seek(offset + size + numtags * tagsize)
return
tags = self.tags
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple tags with same code
# e.g. MicroManager files contain two image_description tags
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell() # where offset to next IFD is stored
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._fix_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if name in tags:
#tags[name] = TiffTag(code, dtype=dtype, count=count,
# value=default, name=name)
if validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
elif default is not None:
setattr(self, name, validate[default] if validate else default)
if 'bits_per_sample' in tags:
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
if 'sample_format' in tags:
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v]
for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_length' in tags:
if 'rows_per_strip' not in tags:
self.rows_per_strip = self.image_length
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_vista or self.parent.is_vista:
# ISS Vista writes wrong image_depth tag
self.image_depth = 1
if self.is_indexed:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
# TODO: support other photometric modes than RGB
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_indexed:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (planes, image_length, image_width,
self.color_map.shape[0])
else:
self.shape = (planes, image_depth, image_length,
image_width, self.color_map.shape[0])
self.axes = self.axes + 'S'
else:
warnings.warn("palette cannot be applied")
self.is_indexed = False
elif self.is_indexed:
samples = 1
if 'extra_samples' in self.tags:
samples += self.tags['extra_samples'].count
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (image_length, image_width,
self.color_map.shape[0])
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
self.color_map.shape[0])
self.axes = 'ZYXS'
else:
warnings.warn("palette cannot be applied")
self.is_indexed = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8),)
assert len(self.shape) == len(self.axes)
def _patch_imagej(self):
"""Return if ImageJ data are contiguous and adjust page attributes.
Patch 'strip_offsets' and 'strip_byte_counts' tags to span the
complete contiguous data.
ImageJ stores all image metadata in the first page and image data is
stored contiguously before the second page, if any. No need to
read other pages.
"""
if not self.is_imagej or not self.is_contiguous or self.parent.is_ome:
return
images = self.imagej_tags.get('images', 0)
if images <= 1:
return
offset, count = self.is_contiguous
shape = self.shape
if self.is_indexed:
shape = shape[:-1]
fh = self.parent.filehandle
if (count != product(shape) * self.bits_per_sample // 8 or
offset + count*images > fh.size):
self.is_imagej = False
warnings.warn("invalid ImageJ metadata or corrupted file")
return
# check that next page is stored after data
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
pos = fh.tell()
fmt = {4: 'I', 8: 'Q'}[offset_size]
nextpage = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
fh.seek(pos)
if nextpage and offset + count*images > nextpage:
return
# patch metadata
pre = 'tile' if self.is_tiled else 'strip'
self.tags[pre+'_offsets'].value = (offset,)
self.tags[pre+'_byte_counts'].value = (count * images,)
self.shape = (images,) + self.shape
self._shape = (images,) + self._shape[1:]
self.axes = 'I' + self.axes
return True
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True,
maxsize=64*2**30):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64-bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
maxsize: int or None
Maximum size of data before a ValueError is raised.
Can be used to catch DOS. Default: 64 GB.
"""
if not self._shape:
return
if maxsize and product(self._shape) > maxsize:
raise ValueError("data is too large %s" % str(self._shape))
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if 'sample_format' in self.tags:
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats do not match %s" % tag.value)
if self.is_chroma_subsampled:
# TODO: implement chroma subsampling
raise NotImplementedError("chroma subsampling not supported")
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
lsb2msb = self.fill_order == 'lsb2msb'
byte_counts, offsets = self._byte_counts_offsets
if self.is_tiled:
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
runlen = image_width
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
if lsb2msb:
reverse_bitorder(result)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x, typecode=typecode):
if self.predictor == 'float':
# the floating point horizontal differencing decoder
# needs the raw byte order
typecode = dtype
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8)) *
(bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpack_rgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpack_ints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
def decompress(x):
return decode_jpeg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = fh.read(bytecount)
if lsb2msb:
tile = reverse_bitorder(tile)
tile = decompress(tile)
tile = unpack(tile)
try:
tile.shape = tile_shape
except ValueError:
# incomplete tiles; see gdal issue #1179
warnings.warn("invalid tile data")
t = numpy.zeros(tile_shape, dtype).reshape(-1)
s = min(tile.size, t.size)
t[:s] = tile[:s]
tile = t.reshape(tile_shape)
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
elif self.predictor == 'float':
raise NotImplementedError()
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = self.rows_per_strip * self.image_width
if self.planar_configuration == 'contig':
strip_size *= self.samples_per_pixel
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
if lsb2msb:
strip = reverse_bitorder(strip)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor and not (self.is_tiled and not self.is_contiguous):
if self.parent.is_lsm and not self.compression:
pass # work around bug in LSM510 software
elif self.predictor == 'horizontal':
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
elif self.predictor == 'float':
result = decode_floats(result)
if colormapped and self.is_indexed:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = apply_colormap(result[:, 0:1, :, :, :, 0:1],
self.color_map)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples,)
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file should remain open if an exception occurred above
fh.close()
return result
@lazyattr
def _byte_counts_offsets(self):
"""Return simplified byte_counts and offsets."""
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
j = 0
for i, (b, o) in enumerate(zip(byte_counts, offsets)):
if b > 0 and o > 0:
if i > j:
byte_counts[j] = b
offsets[j] = o
j += 1
elif b > 0 and o <= 0:
raise ValueError("invalid offset")
else:
warnings.warn("empty byte count")
if j == 0:
j = 1
return byte_counts[:j], offsets[:j]
def _is_memmappable(self, rgbonly, colormapped):
"""Return if page's image data in file can be memory-mapped."""
return (self.parent.filehandle.is_file and
self.is_contiguous and
(self.bits_per_sample == 8 or self.parent._is_native) and
self.fill_order == 'msb2lsb' and
not self.predictor and
not self.is_chroma_subsampled and
not (rgbonly and 'extra_samples' in self.tags) and
not (colormapped and self.is_indexed))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction, fill_order, and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1] or
byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
'x'.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_scn', 'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def info(self):
"""Return string with detailed information about page."""
result = ['\n'.join((str(self), str(self.tags)))]
if self.is_indexed:
result.append('Color Map: %s, %s' % (self.color_map.shape,
self.color_map.dtype))
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header', 'tvips_metadata', 'sfeg_metadata',
'helios_metadata', 'sem_metadata'):
if hasattr(self, attr):
result.append('\n'.join((
attr.upper(), str(Record(getattr(self, attr))))))
if self.is_micromanager:
result.append('MICROMANAGER_FILE_METADATA\n%s' %
Record(self.micromanager_metadata))
return '\n\n'.join(result)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
result = imagej_description_dict(self.is_imagej)
if 'imagej_metadata' in self.tags:
try:
result.update(imagej_metadata(
self.tags['imagej_metadata'].value,
self.tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""Page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""Page contains contiguous image."""
if 'planar_configuration' in self.tags:
return self.tags['planar_configuration'].value == 1
return True
@lazyattr
def is_indexed(self):
"""Page contains indexed, palette-colored image.
Disable color-mapping for OME, LSM, STK, and ImageJ hyperstacks.
"""
if (self.is_stk or self.is_lsm or self.parent.is_lsm or
self.is_ome or self.parent.is_ome):
return False
if self.is_imagej:
if b'mode' in self.is_imagej:
return False
elif self.parent.is_imagej:
return self.parent.is_indexed
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""Page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""Page is reduced image of another image."""
return ('new_subfile_type' in self.tags and
self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_chroma_subsampled(self):
"""Page contains chroma subsampled image."""
return ('ycbcr_subsampling' in self.tags and
self.tags['ycbcr_subsampling'].value != (1, 1))
@lazyattr
def is_mdgel(self):
"""Page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""Page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""Page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""Page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""Page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""Page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""Page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_vista(self):
"""Software tag is 'ISS Vista'."""
return ('software' in self.tags and
self.tags['software'].value == b'ISS Vista')
@lazyattr
def is_ome(self):
"""Page contains OME-XML in image_description tag."""
if 'image_description' not in self.tags:
return False
d = self.tags['image_description'].value.strip()
return d.startswith(b'<?xml version=') and d.endswith(b'</OME>')
@lazyattr
def is_scn(self):
"""Page contains Leica SCN XML in image_description tag."""
if 'image_description' not in self.tags:
return False
d = self.tags['image_description'].value.strip()
return d.startswith(b'<?xml version=') and d.endswith(b'</scn>')
@lazyattr
def is_shaped(self):
"""Return description containing shape if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if b'"shape":' in description or b'shape=(' in description:
return description
if 'image_description_1' in self.tags:
description = self.tags['image_description_1'].value
if b'"shape":' in description or b'shape=(' in description:
return description
@lazyattr
def is_imagej(self):
"""Return ImageJ description if exists, else None."""
if 'image_description' in self.tags:
description = self.tags['image_description'].value
if description.startswith(b'ImageJ='):
return description
if 'image_description_1' in self.tags:
# Micromanager
description = self.tags['image_description_1'].value
if description.startswith(b'ImageJ='):
return description
@lazyattr
def is_micromanager(self):
"""Page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
@lazyattr
def is_tvips(self):
"""Page contains TVIPS metadata."""
return 'tvips_metadata' in self.tags
@lazyattr
def is_fei(self):
"""Page contains SFEG or HELIOS metadata."""
return 'sfeg_metadata' in self.tags or 'helios_metadata' in self.tags
@lazyattr
def is_sem(self):
"""Page contains Zeiss SEM metadata."""
return 'sem_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name, _, _, cout_, _ = TIFF_TAGS[code]
if cout_ and cout_ != count:
count = cout_
warnings.warn("incorrect count for tag '%s'" % name)
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (
273, 279, 324, 325, 530, 531):
# scalar value if not strip/tile offsets/byte_counts or subsampling
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes) and
self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _fix_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this case; need example file
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffPageSeries(object):
"""Series of TIFF pages with compatible shape and data type.
Attributes
----------
pages : list of TiffPage
Sequence of TiffPages in series.
dtype : numpy.dtype or str
Data type of the image array in series.
shape : tuple
Dimensions of the image array in series.
axes : str
Labels of axes in shape. See TiffPage.axes.
offset : int or None
Position of image data in file if memory-mappable, else None.
"""
#__slots__ = 'pages', 'shape', 'dtype', 'axes', 'parent'
def __init__(self, pages, shape, dtype, axes, parent=None):
# TODO? sort pages by page number?
self.index = 0
self.pages = pages
self.shape = tuple(shape)
self.axes = ''.join(axes)
self.dtype = numpy.dtype(dtype)
if parent:
self.parent = parent
elif len(pages):
self.parent = pages[0].parent
else:
self.parent = None
def asarray(self, memmap=False):
"""Return image data from series of TIFF pages as numpy array.
Parameters
----------
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if self.parent:
return self.parent.asarray(series=self, memmap=memmap)
@lazyattr
def offset(self):
"""Return offset to memory-mappable data in page series."""
if len(self.pages) == 0:
return
rgbonly = False
colormapped = self.pages[0].is_indexed
pos = 0
for page in self.pages:
if page is None:
return
if not page._is_memmappable(rgbonly, colormapped):
return
if not pos:
pos = page.is_contiguous[0] + page.is_contiguous[1]
continue
if pos != page.is_contiguous[0]:
return
pos += page.is_contiguous[1]
offset = self.pages[0].is_contiguous[0]
if (pos != offset + product(self.shape) * self.dtype.itemsize and
not self.pages[0].is_imagej):
return
return offset
def __len__(self):
"""Return number of TiffPages in series."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified TiffPage."""
return self.pages[key]
def __iter__(self):
"""Return iterator over TiffPages in series."""
return iter(self.pages)
def __str__(self):
"""Return string with information about series."""
s = ', '.join(s for s in (
'x'.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
self.axes,
'%i pages' % len(self.pages),
('memmap-offset=%i' % self.offset) if self.offset else
'not mem-mappable'))
return 'Series %i: %s' % (self.index, s)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Attributes
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
Binary streams are not supported.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
if not isinstance(files[0], basestring):
raise ValueError("not a file name")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, tempdir=None, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes do not match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile(dir=tempdir) as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern does not match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern does not match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern does not match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes do not match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
# numpy records
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
elif isinstance(v, Record):
s.append(("* %s:\n%s" % (k, str(v).replace('*', ' *'))))
continue
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
A limited, special purpose file handler that can:
* handle embedded files (for CZI within CZI files)
* re-open closed files (for multi file formats, such as OME-TIFF)
* read and write numpy arrays and records from file like objects
Only 'rb' and 'wb' modes are supported. Concurrently reading and writing
of the same stream is untested.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory-mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_file', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, file, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
file : str, binary stream, or FileHandle
File name or seekable binary stream, such as a open file
or BytesIO.
mode : str
File open mode in case 'file' is a file name. Must be 'rb' or 'wb'.
name : str
Optional name of file in case 'file' is a binary stream.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._file = file
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._file, basestring):
# file name
self._file = os.path.realpath(self._file)
self._dir, self._name = os.path.split(self._file)
self._fh = open(self._file, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._file, FileHandle):
# FileHandle
self._fh = self._file._fh
if self._offset is None:
self._offset = 0
self._offset += self._file._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._file._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._file._name
if self._mode and self._mode != self._file._mode:
raise ValueError('FileHandle has wrong mode')
self._mode = self._file._mode
self._dir = self._file._dir
elif hasattr(self._file, 'seek'):
# binary stream: open file, BytesIO
try:
self._file.tell()
except Exception:
raise ValueError("binary stream is not seekable")
self._fh = self._file
if self._offset is None:
self._offset = self._file.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed binary stream"
try:
self._mode = self._fh.mode
except AttributeError:
pass
else:
raise ValueError("The first parameter must be a file name, "
"seekable binary stream, or FileHandle")
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def write(self, bytestring):
"""Write bytestring to file."""
return self._fh.write(bytestring)
def flush(self):
"""Flush write buffers if applicable."""
return self._fh.flush()
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory-map file without fileno")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def write_array(self, data):
"""Write numpy array to binary file."""
try:
data.tofile(self._fh)
except Exception:
# BytesIO
self._fh.write(data.tostring())
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2 and self._size > 0:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON '%s'" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.ndarray."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for _ in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for _ in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("invalid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_tvips_header(fh, byteorder, dtype, count):
"""Read TVIPS EM-MENU headers and return as Record."""
header = Record(fh.read_record(TVIPS_HEADER_V1, byteorder=byteorder))
if header.version == 2:
header = Record(fh.read_record(TVIPS_HEADER_V2, byteorder=byteorder))
if header.magic != int(0xaaaaaaaa):
raise ValueError("invalid TVIPS v2 magic number")
# decode utf16 strings
for name, typestr in TVIPS_HEADER_V2:
if typestr.startswith('V'):
s = header[name].tostring().decode('utf16', errors='ignore')
header[name] = stripnull(s, null='\0')
# convert nm to m
for axis in 'xy':
header['physical_pixel_size_' + axis] /= 1e9
header['pixel_size_' + axis] /= 1e9
elif header.version != 1:
raise ValueError("unknown TVIPS header version")
return header
def read_fei_metadata(fh, byteorder, dtype, count):
"""Read FEI SFEG/HELIOS headers and return as nested Record."""
result = Record()
section = Record()
for line in fh.read(count).splitlines():
line = line.strip()
if line.startswith(b'['):
section = Record()
result[bytes2str(line[1:-1])] = section
continue
try:
key, value = line.split(b'=')
except ValueError:
continue
section[bytes2str(key)] = astype(value)
return result
def read_sem_metadata(fh, byteorder, dtype, count):
"""Read Zeiss SEM tag and return as Record."""
result = Record({'': ()})
key = None
for line in fh.read(count).splitlines():
line = line.decode('cp1252')
if line.isupper():
key = line.lower()
elif key:
try:
name, value = line.split('=')
except ValueError:
continue
value = value.strip()
unit = ''
try:
v, u = value.split()
number = astype(v, (int, float))
if number != v:
value = number
unit = u
except:
number = astype(value, (int, float))
if number != value:
value = number
if value in ('No', 'Off'):
value = False
elif value in ('Yes', 'On'):
value = True
result[key] = (name.strip(), value)
if unit:
result[key] += (unit,)
key = None
else:
result[''] += (astype(line, (int, float)),)
return result
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
result = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
result['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
result['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
result['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
result['comments'] = read_json(fh, byteorder, None, count)
return result
def imagej_metadata(data, bytecounts, byteorder):
"""Return dictionary from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description_dict(description):
"""Return dictionary from ImageJ image description byte string.
Raise ValueError if not a valid ImageJ description.
>>> description = b'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n'
>>> imagej_description_dict(description) # doctest: +SKIP
{'ImageJ': '1.11a', 'images': 510, 'hyperstack': True}
"""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
if 'ImageJ' not in result:
raise ValueError("not a ImageJ image description")
return result
def imagej_description(shape, rgb=None, colormaped=False, version='1.11a',
hyperstack=None, mode=None, loop=None, **kwargs):
"""Return ImageJ image decription from data shape as byte string.
ImageJ can handle up to 6 dimensions in order TZCYXS.
>>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP
ImageJ=1.11a
images=510
channels=2
slices=5
frames=51
hyperstack=true
mode=grayscale
loop=false
"""
if colormaped:
raise NotImplementedError("ImageJ colormapping not supported")
shape = imagej_shape(shape, rgb=rgb)
rgb = shape[-1] in (3, 4)
result = ['ImageJ=%s' % version]
append = []
result.append('images=%i' % product(shape[:-3]))
if hyperstack is None:
#if product(shape[:-3]) > 1:
hyperstack = True
append.append('hyperstack=true')
else:
append.append('hyperstack=%s' % bool(hyperstack))
if shape[2] > 1:
result.append('channels=%i' % shape[2])
if mode is None and not rgb:
mode = 'grayscale'
if hyperstack and mode:
append.append('mode=%s' % mode)
if shape[1] > 1:
result.append('slices=%i' % shape[1])
if shape[0] > 1:
result.append("frames=%i" % shape[0])
if loop is None:
append.append('loop=false')
if loop is not None:
append.append('loop=%s' % bool(loop))
for key, value in kwargs.items():
append.append('%s=%s' % (key.lower(), value))
return str2bytes('\n'.join(result + append + ['']))
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional")
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError("invalid ImageJ hyperstack: not a RGB image")
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError("invalid ImageJ hyperstack: not a non-RGB image")
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
else:
return (1, ) * (5 - ndim) + shape + (1,)
def image_description_dict(description):
"""Return dictionary from image description byte string.
Raise ValuError if description is of unknown format.
>>> image_description_dict(b'shape=(256, 256, 3)')
{'shape': (256, 256, 3)}
>>> description = b'{"shape": [256, 256, 3], "axes": "YXS"}'
>>> image_description_dict(description) # doctest: +SKIP
{'shape': [256, 256, 3], 'axes': 'YXS'}
"""
if description.startswith(b'shape='):
# old style 'shaped' description
shape = tuple(int(i) for i in description[7:-1].split(b','))
return dict(shape=shape)
if description.startswith(b'{') and description.endswith(b'}'):
# JSON description
return json.loads(description.decode('utf-8'))
raise ValueError("unknown image description")
def image_description(shape, colormaped=False, **metadata):
"""Return image description from data shape and meta data.
Return UTF-8 encoded JSON.
>>> image_description((256, 256, 3), axes='YXS') # doctest: +SKIP
b'{"shape": [256, 256, 3], "axes": "YXS"}'
"""
if colormaped:
shape = shape + (3,)
metadata.update({'shape': shape})
return json.dumps(metadata).encode('utf-8')
def _replace_by(module_function, package=__package__, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if package:
module = import_module('.' + module, package=package)
else:
module = import_module(module)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decode_floats(data):
"""Decode floating point horizontal differencing.
The TIFF predictor type 3 reorders the bytes of the image values and
applies horizontal byte differencing to improve compression of floating
point images. The ordering of interleaved color channels is preserved.
Parameters
----------
data : numpy.ndarray
The image to be decoded. The dtype must be a floating point.
The shape must include the number of contiguous samples per pixel
even if 1.
"""
shape = data.shape
dtype = data.dtype
if len(shape) < 3:
raise ValueError('invalid data shape')
if dtype.char not in 'dfe':
raise ValueError('not a floating point image')
littleendian = data.dtype.byteorder == '<' or (
sys.byteorder == 'little' and data.dtype.byteorder == '=')
# undo horizontal byte differencing
data = data.view('uint8')
data.shape = shape[:-2] + (-1,) + shape[-1:]
numpy.cumsum(data, axis=-2, dtype='uint8', out=data)
# reorder bytes
if littleendian:
data.shape = shape[:-2] + (-1,) + shape[-2:]
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
data = data[..., ::-1]
# back to float
data = numpy.ascontiguousarray(data)
data = data.view(dtype)
data.shape = shape
return data
def decode_jpeg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
from czifile import _czifile
image = _czifile.decode_jpeg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decode_packbits')
def decode_packbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decode_lzw')
def decode_lzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of 'bitw' bits at 'bitcount' position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpack_ints')
def unpack_ints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l,), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpack_rgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpack_rgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpack_rgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpack_rgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
@_replace_by('_tifffile.reverse_bitorder')
def reverse_bitorder(data):
"""Reverse bits in each byte of byte string or numpy array.
Decode data where pixels with lower column values are stored in the
lower-order bits of the bytes (fill_order == 'lsb2msb').
Parameters
----------
data : byte string or ndarray
The data to be bit reversed. If byte string, a new bit-reversed byte
string is returned. Numpy arrays are bit-reversed in-place.
"""
table = (
b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(\xa8h'
b'\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14\x94T\xd44'
b'\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|\xfc\x02\x82B'
b'\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*\xaaj\xea\x1a'
b'\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16\x96V\xd66\xb6v\xf6'
b'\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~\xfe\x01\x81A\xc1!\xa1a'
b'\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)\xa9i\xe9\x19\x99Y\xd99'
b'\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15\x95U\xd55\xb5u\xf5\r\x8dM'
b'\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}\xfd\x03\x83C\xc3#\xa3c\xe3\x13'
b'\x93S\xd33\xb3s\xf3\x0b\x8bK\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb'
b'\x07\x87G\xc7\'\xa7g\xe7\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo'
b'\xef\x1f\x9f_\xdf?\xbf\x7f\xff')
try:
view = data.view('uint8')
numpy.take(numpy.fromstring(table, dtype='uint8'), view, out=view)
except AttributeError:
return data.translate(table)
except ValueError:
raise NotImplementedError("slices of arrays not supported")
def apply_colormap(image, colormap, contig=True):
"""Return palette-colored image.
The image values are used to index the colormap on axis 1. The returned
image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype.
Parameters
----------
image : numpy.ndarray
Indexes into the colormap.
colormap : numpy.ndarray
RGB lookup table aka palette of shape (3, 2**bits_per_sample).
contig : bool
If True, return a contiguous array.
Examples
--------
>>> image = numpy.arange(256, dtype='uint8')
>>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256
>>> apply_colormap(image, colormap)[-1]
array([65280, 65280, 65280], dtype=uint16)
"""
image = numpy.take(colormap, image, axis=1)
image = numpy.rollaxis(image, 0, image.ndim)
if contig:
image = numpy.ascontiguousarray(image)
return image
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy.ndarray
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def reshape_nd(image, ndim):
"""Return image array with at least ndim dimensions.
Prepend 1s to image shape as necessary.
>>> reshape_nd(numpy.empty(0), 1).shape
(0,)
>>> reshape_nd(numpy.empty(1), 2).shape
(1, 1)
>>> reshape_nd(numpy.empty((2, 3)), 3).shape
(1, 2, 3)
>>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape
(3, 4, 5)
"""
if image.ndim >= ndim:
return image
image = image.reshape((1,) * (ndim - image.ndim) + image.shape)
return image
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape do not match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return tuple(shape), ''.join(axes)
def transpose_axes(image, axes, asaxes='CTZYX'):
"""Return image with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to image
shape = image.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
image = image.reshape(shape)
# transpose axes
image = image.transpose([axes.index(ax) for ax in asaxes])
return image
def reshape_axes(axes, shape, newshape):
"""Return axes matching new shape.
Unknown dimensions are labelled 'Q'.
>>> reshape_axes('YXS', (219, 301, 1), (219, 301))
'YX'
>>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1))
'QQYQXQ'
"""
shape = tuple(shape)
newshape = tuple(newshape)
if len(axes) != len(shape):
raise ValueError("axes do not match shape")
if product(shape) != product(newshape):
raise ValueError("can not reshape %s to %s" % (shape, newshape))
if not axes or not newshape:
return ''
lendiff = max(0, len(shape) - len(newshape))
if lendiff:
newshape = newshape + (1,) * lendiff
i = len(shape)-1
prodns = 1
prods = 1
result = []
for ns in newshape[::-1]:
prodns *= ns
while i > 0 and shape[i] == 1 and ns != 1:
i -= 1
if ns == shape[i] and prodns == prods*shape[i]:
prods *= shape[i]
result.append(axes[i])
i -= 1
else:
result.append('Q')
return ''.join(reversed(result[lendiff:]))
def stack_pages(pages, memmap=False, tempdir=None, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
data0 = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + data0.shape
if memmap:
with tempfile.NamedTemporaryFile(dir=tempdir) as fh:
data = numpy.memmap(fh, dtype=data0.dtype, shape=shape)
else:
data = numpy.empty(shape, dtype=data0.dtype)
data[0] = data0
if memmap:
data.flush()
del data0
for i, page in enumerate(pages[1:]):
data[i+1] = page.asarray(*args, **kwargs)
if memmap:
data.flush()
return data
def stripnull(string, null=b'\x00'):
"""Return string truncated at first null character.
Clean NULL terminated C strings. For unicode strings use null='\\0'.
"""
i = string.find(null)
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def astype(value, types=None):
"""Return argument as one of types if possible."""
if types is None:
types = int, float, bytes2str
for typ in types:
try:
return typ(value)
except (ValueError, TypeError, UnicodeEncodeError):
pass
return value
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value,)
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def parse_kwargs(kwargs, *keys, **keyvalues):
"""Return dict with keys from keys|keyvals and values from kwargs|keyvals.
Existing keys are deleted from kwargs.
>>> kwargs = {'one': 1, 'two': 2, 'four': 4}
>>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5)
>>> kwargs == {'one': 1}
True
>>> kwargs2 == {'two': 2, 'four': 4, 'five': 5}
True
"""
result = {}
for key in keys:
if key in kwargs:
result[key] = kwargs[key]
del kwargs[key]
for key, value in keyvalues.items():
if key in kwargs:
result[key] = kwargs[key]
del kwargs[key]
else:
result[key] = value
return result
def update_kwargs(kwargs, **keyvalues):
"""Update dict with keys and values if keys do not already exist.
>>> kwargs = {'one': 1, }
>>> update_kwargs(kwargs, one=None, two=2)
>>> kwargs == {'one': 1, 'two': 2}
True
"""
for key, value in keyvalues.items():
if key not in kwargs:
kwargs[key] = value
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
34925: 'lzma',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decode_packbits,
'lzw': decode_lzw,
# 'jpeg': decode_jpeg
}
if lzma:
TIFF_DECOMPESSORS['lzma'] = lzma.decompress
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
'P': 'dimension_p',
'M': 'dimension_m',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# TVIPS metadata from EMMENU Help file
TVIPS_HEADER_V1 = [
('version', 'i4'),
('comment_v1', 'a80'),
('high_tension', 'i4'),
('spherical_aberration', 'i4'),
('illumination_aperture', 'i4'),
('magnification', 'i4'),
('post-magnification', 'i4'),
('focal_length', 'i4'),
('defocus', 'i4'),
('astigmatism', 'i4'),
('astigmatism_direction', 'i4'),
('biprism_voltage', 'i4'),
('specimen_tilt_angle', 'i4'),
('specimen_tilt_direction', 'i4'),
('illumination_tilt_direction', 'i4'),
('illumination_tilt_angle', 'i4'),
('image_mode', 'i4'),
('energy_spread', 'i4'),
('chromatic_aberration', 'i4'),
('shutter_type', 'i4'),
('defocus_spread', 'i4'),
('ccd_number', 'i4'),
('ccd_size', 'i4'),
('offset_x_v1', 'i4'),
('offset_y_v1', 'i4'),
('physical_pixel_size', 'i4'),
('binning', 'i4'),
('readout_speed', 'i4'),
('gain_v1', 'i4'),
('sensitivity_v1', 'i4'),
('exposure_time_v1', 'i4'),
('flat_corrected', 'i4'),
('dead_px_corrected', 'i4'),
('image_mean', 'i4'),
('image_std', 'i4'),
('displacement_x', 'i4'),
('displacement_y', 'i4'),
('date_v1', 'i4'),
('time_v1', 'i4'),
('image_min', 'i4'),
('image_max', 'i4'),
('image_statistics_quality', 'i4'),
]
TVIPS_HEADER_V2 = [
('image_name', 'V160'), # utf16
('image_folder', 'V160'),
('image_size_x', 'i4'),
('image_size_y', 'i4'),
('image_size_z', 'i4'),
('image_size_e', 'i4'),
('image_data_type', 'i4'),
('date', 'i4'),
('time', 'i4'),
('comment', 'V1024'),
('image_history', 'V1024'),
('scaling', '16f4'),
('image_statistics', '16c16'),
('image_type', 'i4'),
('image_display_type', 'i4'),
('pixel_size_x', 'f4'), # distance between two px in x, [nm]
('pixel_size_y', 'f4'), # distance between two px in y, [nm]
('image_distance_z', 'f4'),
('image_distance_e', 'f4'),
('image_misc', '32f4'),
('tem_type', 'V160'),
('tem_high_tension', 'f4'),
('tem_aberrations', '32f4'),
('tem_energy', '32f4'),
('tem_mode', 'i4'),
('tem_magnification', 'f4'),
('tem_magnification_correction', 'f4'),
('post_magnification', 'f4'),
('tem_stage_type', 'i4'),
('tem_stage_position', '5f4'), # x, y, z, a, b
('tem_image_shift', '2f4'),
('tem_beam_shift', '2f4'),
('tem_beam_tilt', '2f4'),
('tiling_parameters', '7f4'), # 0: tiling? 1:x 2:y 3: max x 4: max y
# 5: overlap x 6: overlap y
('tem_illumination', '3f4'), # 0: spotsize 1: intensity
('tem_shutter', 'i4'),
('tem_misc', '32f4'),
('camera_type', 'V160'),
('physical_pixel_size_x', 'f4'),
('physical_pixel_size_y', 'f4'),
('offset_x', 'i4'),
('offset_y', 'i4'),
('binning_x', 'i4'),
('binning_y', 'i4'),
('exposure_time', 'f4'),
('gain', 'f4'),
('readout_rate', 'f4'),
('flatfield_description', 'V160'),
('sensitivity', 'f4'),
('dose', 'f4'),
('cam_misc', '32f4'),
('fei_microscope_information', 'V1024'),
('fei_specimen_information', 'V1024'),
('magic', 'u4'),
]
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, None, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: None, 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal', 3: 'float'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
330: ('sub_ifds', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, None, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
346: ('indexed', 0, 3, 1, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', (1, 1), 3, 2, None),
531: ('ycbcr_positioning', (1, 1), 3, 1, None),
532: ('reference_black_white', None, 5, 1, None),
32995: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, None, None), # use sample_format
32997: ('image_depth', 1, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
51023: ('fibics_xml', None, 2, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34118: ('sem_metadata', read_sem_metadata), # Zeiss SEM
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
34680: ('sfeg_metadata', read_fei_metadata), # S-FEG
34682: ('helios_metadata', read_fei_metadata), # Helios NanoLab
37706: ('tvips_metadata', read_tvips_header), # TVIPS EMMENU
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation=None,
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported `from matplotlib import pyplot`.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image width and length.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can not handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = data.squeeze()
if photometric in ('miniswhite', 'minisblack'):
data = reshape_nd(data, 2)
else:
data = reshape_nd(data, 3)
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and
data.shape[-1] < data.shape[-3] // 8 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if isrgb:
data = data[..., :maxdim, :maxdim, :maxdim]
else:
data = data[..., :maxdim, :maxdim]
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
# TODO: handle complex types
raise NotImplementedError("complex type")
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0,) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0,) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def askopenfilename(**kwargs):
"""Return file name(s) from Tkinter's file open dialog."""
try:
from Tkinter import Tk
import tkFileDialog as filedialog
except ImportError:
from tkinter import Tk, filedialog
root = Tk()
root.withdraw()
root.update()
filenames = filedialog.askopenfilename(**kwargs)
root.destroy()
return filenames
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.7:
print("This script requires Python version 2.7 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="do not read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="do not display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--vmin', dest='vmin', type='int', default=None,
help="set minimum value for colormapping")
opt('--vmax', dest='vmax', type='int', default=None,
help="set maximum value for colormapping")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
path = askopenfilename(
title="Select a TIFF file",
filetypes=[("TIF files", "*.tif"), ("LSM files", "*.lsm"),
("STK files", "*.stk"), ("allfiles", "*")])
if not path:
parser.error("No file specified")
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = []
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print()
print(tif.info())
print()
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = settings.vmin, settings.vmax
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = settings.vmin, settings.vmax
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
def bytes2str(b):
return str(b, 'cp1252')
def str2bytes(s, encoding="latin-1"):
return s.encode(encoding)
else:
bytes2str = str
def str2bytes(s):
return s
class FileNotFoundError(IOError):
pass
if __name__ == "__main__":
sys.exit(main())
| 36.224216
| 79
| 0.556868
|
7c82701f6f499b9d0cdcd12caf514abd70e7e29e
| 1,039
|
py
|
Python
|
bulk-update-github-auth-token.py
|
gopaljigaur/github-gitea-mirror
|
de3b97ef9fd36fd352f94ceee7e79de02cc6ebef
|
[
"MIT"
] | 79
|
2020-10-14T03:05:45.000Z
|
2022-03-27T01:07:52.000Z
|
bulk-update-github-auth-token.py
|
Ta180m/github-gitea-mirror
|
0238264eda182f8ad946a28a95405297ef12e6e7
|
[
"MIT"
] | 2
|
2021-02-20T01:26:07.000Z
|
2021-03-16T00:52:06.000Z
|
bulk-update-github-auth-token.py
|
Ta180m/github-gitea-mirror
|
0238264eda182f8ad946a28a95405297ef12e6e7
|
[
"MIT"
] | 12
|
2020-10-18T01:31:47.000Z
|
2021-11-09T23:44:57.000Z
|
#!/usr/bin/env python
#################################################################
### This file can be used to bulk update ###
### all repositories with new auth token ###
### Because Gitea Currently Dose not provide a option for it. ###
### Usage : python3 bulk-update-github-auth-token.py ###
#################################################################
import glob
from pathlib import Path
BASEPATH = "/var/lib/gitea/gitea-repositories/"
SEARCH_PATH = "*/*/config"
SEARCH_TOKEN = "OLD_GITHUB_AUTH_TOKEN"
REPLACE_TOKEN = "NEW_GITHUB_AUTH_TOKEN"
for path in glob.glob("{0}{1}".format(BASEPATH,SEARCH_PATH), recursive=True):
LOG_HEAD = path.replace(BASEPATH,"")
print("Updating : {0}".format(LOG_HEAD.replace(".git/config","")))
with open(path, 'r') as file :
filedata = file.read()
filedata = filedata.replace("{0}@".format(SEARCH_TOKEN),"{0}@".format(REPLACE_TOKEN))
with open(path, 'w') as file:
file.write(filedata)
print(" ")
| 35.827586
| 89
| 0.556304
|
869c8ee615e45e1731db86633730a86f47c1b7c8
| 22,399
|
py
|
Python
|
saleor/giftcard/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T00:21:11.000Z
|
2022-03-25T00:21:11.000Z
|
saleor/giftcard/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 81
|
2021-10-11T04:26:07.000Z
|
2022-03-28T04:46:43.000Z
|
saleor/giftcard/tests/test_utils.py
|
victor-abz/saleor
|
f8e2b49703d995d4304d5a690dbe9c83631419d0
|
[
"CC-BY-4.0"
] | 1
|
2022-02-16T22:00:59.000Z
|
2022-02-16T22:00:59.000Z
|
from datetime import date, timedelta
from unittest.mock import patch
import pytest
from dateutil.relativedelta import relativedelta
from django.core.exceptions import ValidationError
from django.utils import timezone
from ...core import TimePeriodType
from ...core.utils.promo_code import InvalidPromoCode
from ...order.models import OrderLine
from ...plugins.manager import get_plugins_manager
from ...site import GiftCardSettingsExpiryType
from ...tests.utils import flush_post_commit_hooks
from .. import GiftCardEvents, GiftCardLineData, events
from ..models import GiftCard, GiftCardEvent
from ..utils import (
add_gift_card_code_to_checkout,
assign_user_gift_cards,
calculate_expiry_date,
deactivate_order_gift_cards,
fulfill_gift_card_lines,
fulfill_non_shippable_gift_cards,
get_gift_card_lines,
get_non_shippable_gift_card_lines,
gift_cards_create,
is_gift_card_expired,
order_has_gift_card_lines,
remove_gift_card_code_from_checkout,
)
def test_add_gift_card_code_to_checkout(checkout, gift_card):
# given
assert checkout.gift_cards.count() == 0
# when
add_gift_card_code_to_checkout(
checkout, "test@example.com", gift_card.code, gift_card.currency
)
# then
assert checkout.gift_cards.count() == 1
def test_add_gift_card_code_to_checkout_inactive_card(checkout, gift_card):
# given
gift_card.is_active = False
gift_card.save(update_fields=["is_active"])
assert checkout.gift_cards.count() == 0
# when
# then
with pytest.raises(InvalidPromoCode):
add_gift_card_code_to_checkout(
checkout, "test@example.com", gift_card.code, gift_card.currency
)
def test_add_gift_card_code_to_checkout_expired_card(checkout, gift_card):
# given
gift_card.expiry_date = date.today() - timedelta(days=10)
gift_card.save(update_fields=["expiry_date"])
assert checkout.gift_cards.count() == 0
# when
# then
with pytest.raises(InvalidPromoCode):
add_gift_card_code_to_checkout(
checkout, "test@example.com", gift_card.code, gift_card.currency
)
def test_add_gift_card_code_to_checkout_invalid_currency(checkout, gift_card):
# given
currency = "EUR"
assert gift_card.currency != currency
assert checkout.gift_cards.count() == 0
# when
# then
with pytest.raises(InvalidPromoCode):
add_gift_card_code_to_checkout(
checkout, "test@example.com", gift_card.code, currency
)
def test_add_gift_card_code_to_checkout_used_gift_card(checkout, gift_card_used):
# given
assert gift_card_used.used_by_email
assert checkout.gift_cards.count() == 0
# when
add_gift_card_code_to_checkout(
checkout,
gift_card_used.used_by_email,
gift_card_used.code,
gift_card_used.currency,
)
# then
assert checkout.gift_cards.count() == 1
def test_add_gift_card_code_to_checkout_used_gift_card_invalid_user(
checkout, gift_card_used
):
# given
email = "new_user@example.com"
assert gift_card_used.used_by_email
assert gift_card_used.used_by_email != email
assert checkout.gift_cards.count() == 0
# when
# then
with pytest.raises(InvalidPromoCode):
add_gift_card_code_to_checkout(
checkout, email, gift_card_used.code, gift_card_used.currency
)
def test_remove_gift_card_code_from_checkout(checkout, gift_card):
# given
checkout.gift_cards.add(gift_card)
assert checkout.gift_cards.count() == 1
# when
remove_gift_card_code_from_checkout(checkout, gift_card.code)
# then
assert checkout.gift_cards.count() == 0
def test_remove_gift_card_code_from_checkout_no_checkout_gift_cards(
checkout, gift_card
):
# given
assert checkout.gift_cards.count() == 0
# when
remove_gift_card_code_from_checkout(checkout, gift_card.code)
# then
assert checkout.gift_cards.count() == 0
@pytest.mark.parametrize(
"period_type, period", [("years", 5), ("weeks", 1), ("months", 13), ("days", 100)]
)
def test_calculate_expiry_settings(period_type, period, site_settings):
# given
site_settings.gift_card_expiry_type = GiftCardSettingsExpiryType.EXPIRY_PERIOD
site_settings.gift_card_expiry_period_type = period_type.rstrip("s")
site_settings.gift_card_expiry_period = period
site_settings.save(
update_fields=[
"gift_card_expiry_type",
"gift_card_expiry_period_type",
"gift_card_expiry_period",
]
)
# when
expiry_date = calculate_expiry_date(site_settings)
# then
assert expiry_date == timezone.now().date() + relativedelta(**{period_type: period})
def test_calculate_expiry_settings_for_never_expire_settings(site_settings):
# given
site_settings.gift_card_expiry_type = GiftCardSettingsExpiryType.NEVER_EXPIRE
# when
expiry_date = calculate_expiry_date(site_settings)
# then
assert expiry_date is None
@patch("saleor.giftcard.utils.send_gift_card_notification")
def test_gift_cards_create(
send_notification_mock,
order,
gift_card_shippable_order_line,
gift_card_non_shippable_order_line,
site_settings,
staff_user,
):
# given
manager = get_plugins_manager()
line_1, line_2 = gift_card_shippable_order_line, gift_card_non_shippable_order_line
user_email = order.user_email
fulfillment = order.fulfillments.create(tracking_number="123")
fulfillment_line_1 = fulfillment.lines.create(
order_line=line_1,
quantity=line_1.quantity,
stock=line_1.allocations.get().stock,
)
fulfillment_line_2 = fulfillment.lines.create(
order_line=line_2,
quantity=line_2.quantity,
stock=line_2.allocations.get().stock,
)
lines_data = [
GiftCardLineData(
quantity=1,
order_line=line_1,
variant=line_1.variant,
fulfillment_line=fulfillment_line_1,
),
GiftCardLineData(
quantity=1,
order_line=line_2,
variant=line_2.variant,
fulfillment_line=fulfillment_line_2,
),
]
# when
gift_cards = gift_cards_create(
order, lines_data, site_settings, staff_user, None, manager
)
# then
assert len(gift_cards) == len(lines_data)
shippable_gift_card = gift_cards[0]
shippable_price = gift_card_shippable_order_line.unit_price_gross
assert shippable_gift_card.initial_balance == shippable_price
assert shippable_gift_card.current_balance == shippable_price
assert shippable_gift_card.created_by == order.user
assert shippable_gift_card.created_by_email == user_email
assert shippable_gift_card.expiry_date is None
assert shippable_gift_card.fulfillment_line == fulfillment_line_1
bought_event_for_shippable_card = GiftCardEvent.objects.get(
gift_card=shippable_gift_card
)
assert bought_event_for_shippable_card.user == staff_user
assert bought_event_for_shippable_card.app is None
assert bought_event_for_shippable_card.type == GiftCardEvents.BOUGHT
assert bought_event_for_shippable_card.parameters == {
"order_id": order.id,
"expiry_date": None,
}
non_shippable_gift_card = gift_cards[1]
non_shippable_price = gift_card_non_shippable_order_line.total_price_gross
assert non_shippable_gift_card.initial_balance == non_shippable_price
assert non_shippable_gift_card.current_balance == non_shippable_price
assert non_shippable_gift_card.created_by == order.user
assert non_shippable_gift_card.created_by_email == user_email
assert non_shippable_gift_card.expiry_date is None
assert non_shippable_gift_card.fulfillment_line == fulfillment_line_2
non_shippable_event = GiftCardEvent.objects.get(
gift_card=non_shippable_gift_card, type=GiftCardEvents.BOUGHT
)
assert non_shippable_event.user == staff_user
assert non_shippable_event.app is None
assert non_shippable_event.parameters == {"order_id": order.id, "expiry_date": None}
flush_post_commit_hooks()
send_notification_mock.assert_called_once_with(
staff_user,
None,
order.user,
user_email,
non_shippable_gift_card,
manager,
order.channel.slug,
resending=False,
)
@patch("saleor.giftcard.utils.send_gift_card_notification")
def test_gift_cards_create_expiry_date_set(
send_notification_mock,
order,
gift_card_shippable_order_line,
gift_card_non_shippable_order_line,
site_settings,
staff_user,
):
# given
manager = get_plugins_manager()
site_settings.gift_card_expiry_type = GiftCardSettingsExpiryType.EXPIRY_PERIOD
site_settings.gift_card_expiry_period_type = TimePeriodType.WEEK
site_settings.gift_card_expiry_period = 20
site_settings.save(
update_fields=[
"gift_card_expiry_type",
"gift_card_expiry_period_type",
"gift_card_expiry_period",
]
)
line_1 = gift_card_non_shippable_order_line
user_email = order.user_email
fulfillment = order.fulfillments.create(tracking_number="123")
fulfillment_line_1 = fulfillment.lines.create(
order_line=line_1,
quantity=line_1.quantity,
stock=line_1.allocations.get().stock,
)
lines_data = [
GiftCardLineData(
quantity=1,
order_line=line_1,
variant=line_1.variant,
fulfillment_line=fulfillment_line_1,
)
]
# when
gift_cards = gift_cards_create(
order, lines_data, site_settings, staff_user, None, manager
)
# then
assert len(gift_cards) == len(lines_data)
gift_card = gift_cards[0]
price = gift_card_non_shippable_order_line.total_price_gross
assert gift_card.initial_balance == price
assert gift_card.current_balance == price
assert gift_card.created_by == order.user
assert gift_card.created_by_email == user_email
assert gift_card.expiry_date
assert gift_card.fulfillment_line == fulfillment_line_1
event = GiftCardEvent.objects.get(gift_card=gift_card, type=GiftCardEvents.BOUGHT)
assert event.user == staff_user
assert event.app is None
assert event.parameters == {
"order_id": order.id,
"expiry_date": gift_card.expiry_date.isoformat(),
}
flush_post_commit_hooks()
send_notification_mock.assert_called_once_with(
staff_user,
None,
order.user,
user_email,
gift_card,
manager,
order.channel.slug,
resending=False,
)
@patch("saleor.giftcard.utils.send_gift_card_notification")
def test_gift_cards_create_multiple_quantity(
send_notification_mock,
order,
gift_card_non_shippable_order_line,
site_settings,
staff_user,
):
# given
manager = get_plugins_manager()
quantity = 3
gift_card_non_shippable_order_line.quantity = quantity
gift_card_non_shippable_order_line.save(update_fields=["quantity"])
fulfillment = order.fulfillments.create(tracking_number="123")
stock = gift_card_non_shippable_order_line.allocations.get().stock
fulfillment_line = fulfillment.lines.create(
order_line=gift_card_non_shippable_order_line, quantity=quantity, stock=stock
)
lines_data = [
GiftCardLineData(
quantity=quantity,
order_line=gift_card_non_shippable_order_line,
variant=gift_card_non_shippable_order_line.variant,
fulfillment_line=fulfillment_line,
)
]
# when
gift_cards = gift_cards_create(
order, lines_data, site_settings, staff_user, None, manager
)
# then
flush_post_commit_hooks()
assert len(gift_cards) == quantity
price = gift_card_non_shippable_order_line.unit_price_gross
for gift_card in gift_cards:
assert gift_card.initial_balance == price
assert gift_card.current_balance == price
assert gift_card.fulfillment_line == fulfillment_line
assert GiftCardEvent.objects.filter(type=GiftCardEvents.BOUGHT).count() == quantity
assert send_notification_mock.call_count == quantity
def test_get_gift_card_lines(
gift_card_non_shippable_order_line, gift_card_shippable_order_line, order_line
):
# given
lines = [
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
order_line,
]
# when
gift_card_lines = get_gift_card_lines(lines)
# then
assert set(gift_card_lines) == {
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
}
def test_get_gift_card_lines_no_gift_card_lines(
order_line_with_one_allocation, order_line
):
# given
lines = [order_line_with_one_allocation, order_line]
# when
gift_card_lines = get_gift_card_lines(lines)
# then
assert not gift_card_lines
def test_get_non_shippable_gift_card_lines(
gift_card_non_shippable_order_line, gift_card_shippable_order_line, order_line
):
# given
lines = [
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
order_line,
]
# when
gift_card_lines = get_non_shippable_gift_card_lines(lines)
# then
assert set(gift_card_lines) == {gift_card_non_shippable_order_line}
def test_get_non_shippable_gift_card_lines_no_gift_card_lines(
order_line_with_one_allocation, order_line
):
# given
lines = [order_line_with_one_allocation, order_line]
# when
gift_card_lines = get_gift_card_lines(lines)
# then
assert not gift_card_lines
@patch("saleor.giftcard.utils.create_fulfillments")
def test_fulfill_non_shippable_gift_cards(
create_fulfillments_mock,
order,
gift_card_shippable_order_line,
gift_card_non_shippable_order_line,
site_settings,
staff_user,
warehouse,
):
# given
manager = get_plugins_manager()
order_lines = [gift_card_shippable_order_line, gift_card_non_shippable_order_line]
# when
fulfill_non_shippable_gift_cards(
order, order_lines, site_settings, staff_user, None, manager
)
# then
fulfillment_lines_for_warehouses = {
str(warehouse.pk): [
{
"order_line": gift_card_non_shippable_order_line,
"quantity": gift_card_non_shippable_order_line.quantity,
},
]
}
create_fulfillments_mock.assert_called_once()
args, kwargs = create_fulfillments_mock.call_args
assert args[0] == staff_user
assert args[1] is None
assert args[2] == order
assert args[3] == fulfillment_lines_for_warehouses
assert args[4] == manager
assert args[5] == site_settings
assert kwargs["notify_customer"] is True
@patch("saleor.giftcard.utils.create_fulfillments")
def test_fulfill_non_shippable_gift_cards_line_with_allocation(
create_fulfillments_mock,
order,
gift_card_shippable_order_line,
gift_card_non_shippable_order_line,
site_settings,
staff_user,
warehouse,
):
# given
manager = get_plugins_manager()
order_lines = [gift_card_shippable_order_line, gift_card_non_shippable_order_line]
order = gift_card_non_shippable_order_line.order
non_shippable_variant = gift_card_non_shippable_order_line.variant
non_shippable_variant.track_inventory = True
non_shippable_variant.save(update_fields=["track_inventory"])
stock = non_shippable_variant.stocks.first()
# when
fulfill_non_shippable_gift_cards(
order, order_lines, site_settings, staff_user, None, manager
)
fulfillment_lines_for_warehouses = {
str(stock.warehouse.pk): [
{
"order_line": gift_card_non_shippable_order_line,
"quantity": gift_card_non_shippable_order_line.quantity,
},
]
}
create_fulfillments_mock.assert_called_once()
args, kwargs = create_fulfillments_mock.call_args
assert args[0] == staff_user
assert args[1] is None
assert args[2] == order
assert args[3] == fulfillment_lines_for_warehouses
assert args[4] == manager
assert args[5] == site_settings
assert kwargs["notify_customer"] is True
def test_fulfill_gift_card_lines(
staff_user,
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
site_settings,
):
# given
manager = get_plugins_manager()
order = gift_card_non_shippable_order_line.order
non_shippable_variant = gift_card_non_shippable_order_line.variant
non_shippable_variant.track_inventory = True
non_shippable_variant.save(update_fields=["track_inventory"])
lines = OrderLine.objects.filter(
pk__in=[
gift_card_non_shippable_order_line.pk,
gift_card_shippable_order_line.pk,
]
)
# when
fulfillments = fulfill_gift_card_lines(
lines, staff_user, None, order, site_settings, manager
)
# then
assert len(fulfillments) == 1
assert fulfillments[0].lines.count() == len(lines)
flush_post_commit_hooks()
gift_cards = GiftCard.objects.all()
assert gift_cards.count() == sum([line.quantity for line in lines])
shippable_gift_cards = gift_cards.filter(
product_id=gift_card_shippable_order_line.variant.product_id
)
assert len(shippable_gift_cards) == gift_card_shippable_order_line.quantity
non_shippable_gift_cards = gift_cards.filter(
product_id=gift_card_non_shippable_order_line.variant.product_id
)
assert len(non_shippable_gift_cards) == gift_card_non_shippable_order_line.quantity
for card in gift_cards:
assert card.initial_balance.amount == round(
gift_card_non_shippable_order_line.unit_price_gross.amount, 2
)
assert card.current_balance.amount == round(
gift_card_non_shippable_order_line.unit_price_gross.amount, 2
)
assert card.fulfillment_line
assert GiftCardEvent.objects.filter(gift_card=card, type=GiftCardEvents.BOUGHT)
def test_fulfill_gift_card_lines_lack_of_stock(
staff_user,
gift_card_non_shippable_order_line,
gift_card_shippable_order_line,
site_settings,
):
# given
manager = get_plugins_manager()
order = gift_card_non_shippable_order_line.order
gift_card_non_shippable_order_line.variant.stocks.all().delete()
lines = OrderLine.objects.filter(
pk__in=[
gift_card_non_shippable_order_line.pk,
gift_card_shippable_order_line.pk,
]
)
# when
with pytest.raises(ValidationError):
fulfill_gift_card_lines(lines, staff_user, None, order, site_settings, manager)
def test_deactivate_order_gift_cards(
gift_card, gift_card_expiry_date, gift_card_created_by_staff, order, staff_user
):
# given
bought_cards = [gift_card, gift_card_expiry_date]
events.gift_cards_bought_event(bought_cards, order.id, staff_user, None)
for card in [gift_card, gift_card_expiry_date, gift_card_created_by_staff]:
assert card.is_active
# when
deactivate_order_gift_cards(order.id, staff_user, None)
# then
for card in bought_cards:
card.refresh_from_db()
assert not card.is_active
assert card.events.filter(type=GiftCardEvents.DEACTIVATED)
assert gift_card_created_by_staff.is_active
assert not gift_card_created_by_staff.events.filter(type=GiftCardEvents.DEACTIVATED)
def test_deactivate_order_gift_cards_no_order_gift_cards(
gift_card, gift_card_expiry_date, gift_card_created_by_staff, order, staff_user
):
# given
cards = [gift_card, gift_card_expiry_date, gift_card_created_by_staff]
for card in cards:
assert card.is_active
# when
deactivate_order_gift_cards(order.id, staff_user, None)
# then
for card in cards:
card.refresh_from_db()
assert card.is_active
def test_order_has_gift_card_lines_true(gift_card_shippable_order_line):
order = gift_card_shippable_order_line.order
assert order_has_gift_card_lines(order) is True
def test_order_has_gift_card_lines_false(order):
assert order_has_gift_card_lines(order) is False
def test_assign_user_gift_cards(
customer_user,
gift_card,
gift_card_expiry_date,
gift_card_used,
gift_card_created_by_staff,
):
# given
card_ids = [
card.id
for card in [
gift_card,
gift_card_expiry_date,
gift_card_created_by_staff,
gift_card_used,
]
]
GiftCard.objects.filter(id__in=card_ids).update(created_by=None)
gift_card_used.used_by = None
gift_card_used.save(update_fields=["used_by"])
# when
assign_user_gift_cards(customer_user)
# then
for card in [gift_card, gift_card_expiry_date]:
card.refresh_from_db()
assert card.created_by == customer_user
gift_card_used.refresh_from_db()
assert gift_card_used.used_by == customer_user
assert not gift_card_used.created_by
def test_assign_user_gift_cards_no_gift_cards_to_assign(
customer_user, gift_card_created_by_staff
):
# given
gift_card_created_by_staff.created_by = None
gift_card_created_by_staff.save(update_fields=["created_by"])
# when
assign_user_gift_cards(customer_user)
# then
gift_card_created_by_staff.refresh_from_db()
assert not gift_card_created_by_staff.created_by
def test_is_gift_card_expired_never_expired_gift_card(gift_card):
# given
assert not gift_card.expiry_date
# when
result = is_gift_card_expired(gift_card)
# then
assert result is False
def test_is_gift_card_expired_true(gift_card):
# given
gift_card.expiry_date = date.today() - timedelta(days=1)
gift_card.save(update_fields=["expiry_date"])
# when
result = is_gift_card_expired(gift_card)
# then
assert result is True
@pytest.mark.parametrize(
"expiry_date", [timezone.now().date(), timezone.now().date() + timedelta(days=1)]
)
def test_is_gift_card_expired_false(expiry_date, gift_card):
# given
gift_card.expiry_date = expiry_date
gift_card.save(update_fields=["expiry_date"])
# when
result = is_gift_card_expired(gift_card)
# then
assert result is False
| 29.472368
| 88
| 0.726148
|
37d736a5ad2d8e9c0af9699ffd61953b92266018
| 16,635
|
py
|
Python
|
tests/flow/test_ts_createrule.py
|
elena-kolevska/RedisTimeSeries
|
7aa3e5b90e593adce60bf1506ec76d055c05f278
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | 643
|
2019-05-14T15:38:10.000Z
|
2022-03-29T01:15:49.000Z
|
tests/flow/test_ts_createrule.py
|
elena-kolevska/RedisTimeSeries
|
7aa3e5b90e593adce60bf1506ec76d055c05f278
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | 888
|
2019-05-15T10:51:07.000Z
|
2022-03-31T10:40:04.000Z
|
tests/flow/test_ts_createrule.py
|
elena-kolevska/RedisTimeSeries
|
7aa3e5b90e593adce60bf1506ec76d055c05f278
|
[
"MIT",
"Ruby",
"BSD-3-Clause"
] | 107
|
2019-05-24T11:12:39.000Z
|
2022-03-31T10:01:00.000Z
|
import math
import random
import statistics
import pytest
import redis
from RLTest import Env
from test_helper_classes import _get_series_value, calc_rule, ALLOWED_ERROR, _insert_data, \
_get_ts_info, _insert_agg_data
key_name = 'tester{abc}'
agg_key_name = '{}_agg_max_10'.format(key_name)
def test_compaction_rules(self):
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name, 'CHUNK_SIZE', '360')
assert r.execute_command('TS.CREATE', agg_key_name)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', -10)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', 0)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', 10)
start_ts = 1488823384
samples_count = 1500
_insert_data(r, key_name, start_ts, samples_count, 5)
last_ts = start_ts + samples_count + 10
r.execute_command('TS.ADD', key_name, last_ts, 5)
actual_result = r.execute_command('TS.RANGE', agg_key_name, start_ts, start_ts + samples_count)
assert len(actual_result) == samples_count / 10
info = _get_ts_info(r, key_name)
assert info.rules == [[agg_key_name.encode('ascii'), 10, b'AVG']]
def test_create_compaction_rule_with_wrong_aggregation():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', agg_key_name)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAXX', 10)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MA', 10)
def test_create_compaction_rule_without_dest_series():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAX', 10)
def test_create_compaction_rule_twice():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAX', 10)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAX', 10)
def test_create_compaction_rule_override_dest():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', 'tester2')
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAX', 10)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', 'tester2', agg_key_name, 'AGGREGATION', 'MAX', 10)
def test_create_compaction_rule_from_target():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', 'tester2')
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'MAX', 10)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', agg_key_name, 'tester2', 'AGGREGATION', 'MAX', 10)
def test_create_compaction_rule_own():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
with pytest.raises(redis.ResponseError) as excinfo:
assert r.execute_command('TS.CREATERULE', key_name, key_name, 'AGGREGATION', 'MAX', 10)
def test_create_compaction_rule_and_del_dest_series():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'AVG', 10)
assert r.delete(agg_key_name)
start_ts = 1488823384
samples_count = 1500
_insert_data(r, key_name, start_ts, samples_count, 5)
def test_std_var_func():
with Env().getClusterConnectionIfNeeded() as r:
raw_key = 'raw{abc}'
std_key = 'std_key{abc}'
var_key = 'var_key{abc}'
random_numbers = 100
random.seed(0)
items = random.sample(range(random_numbers), random_numbers)
stdev = statistics.stdev(items)
var = statistics.variance(items)
assert r.execute_command('TS.CREATE', raw_key)
assert r.execute_command('TS.CREATE', std_key)
assert r.execute_command('TS.CREATE', var_key)
assert r.execute_command('TS.CREATERULE', raw_key, std_key, "AGGREGATION", 'std.s', random_numbers)
assert r.execute_command('TS.CREATERULE', raw_key, var_key, "AGGREGATION", 'var.s', random_numbers)
for i in range(random_numbers):
r.execute_command('TS.ADD', raw_key, i, items[i])
r.execute_command('TS.ADD', raw_key, random_numbers, 0) # close time bucket
assert abs(stdev - float(r.execute_command('TS.GET', std_key)[1])) < ALLOWED_ERROR
assert abs(var - float(r.execute_command('TS.GET', var_key)[1])) < ALLOWED_ERROR
def test_delete_key():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name, 'CHUNK_SIZE', '360')
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', 10)
assert r.delete(agg_key_name)
assert _get_ts_info(r, key_name).rules == []
assert r.execute_command('TS.CREATE', agg_key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', 11)
assert r.delete(key_name)
assert _get_ts_info(r, agg_key_name).sourceKey == None
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATERULE', key_name, agg_key_name, 'AGGREGATION', 'avg', 12)
assert _get_ts_info(r, key_name).rules == [[agg_key_name.encode('ascii'), 12, b'AVG']]
def test_downsampling_current():
with Env().getClusterConnectionIfNeeded() as r:
key = 'src{a}'
agg_key = 'dest{a}'
type_list = ['', 'uncompressed']
agg_list = ['avg', 'sum', 'min', 'max', 'count', 'range', 'first', 'last', 'std.p', 'std.s', 'var.p',
'var.s'] # more
for chunk_type in type_list:
for agg_type in agg_list:
assert r.execute_command('TS.CREATE', key, chunk_type, "DUPLICATE_POLICY", "LAST")
assert r.execute_command('TS.CREATE', agg_key, chunk_type)
assert r.execute_command('TS.CREATERULE', key, agg_key, "AGGREGATION", agg_type, 10)
# present update
assert r.execute_command('TS.ADD', key, 3, 3) == 3
assert r.execute_command('TS.ADD', key, 5, 5) == 5
assert r.execute_command('TS.ADD', key, 7, 7) == 7
assert r.execute_command('TS.ADD', key, 5, 2) == 5
assert r.execute_command('TS.ADD', key, 10, 10) == 10
expected_result = r.execute_command('TS.RANGE', key, 0, '+', 'aggregation', agg_type, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 0, '+')
assert expected_result[0] == actual_result[0]
# present add
assert r.execute_command('TS.ADD', key, 11, 11) == 11
assert r.execute_command('TS.ADD', key, 15, 15) == 15
assert r.execute_command('TS.ADD', key, 14, 14) == 14
assert r.execute_command('TS.ADD', key, 20, 20) == 20
expected_result = r.execute_command('TS.RANGE', key, 0, '+', 'aggregation', agg_type, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 0, '+')
assert expected_result[0:1] == actual_result[0:1]
# present + past add
assert r.execute_command('TS.ADD', key, 23, 23) == 23
assert r.execute_command('TS.ADD', key, 15, 22) == 15
assert r.execute_command('TS.ADD', key, 27, 27) == 27
assert r.execute_command('TS.ADD', key, 23, 25) == 23
assert r.execute_command('TS.ADD', key, 30, 30) == 30
expected_result = r.execute_command('TS.RANGE', key, 0, '+', 'aggregation', agg_type, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 0, '+')
assert expected_result[0:3] == actual_result[0:3]
assert 3 == _get_ts_info(r, agg_key).total_samples
assert 11 == _get_ts_info(r, key).total_samples
r.execute_command('DEL', key)
r.execute_command('DEL', agg_key)
def test_downsampling_extensive():
with Env().getClusterConnectionIfNeeded() as r:
key = 'tester{abc}'
fromTS = 10
toTS = 10000
type_list = ['', 'uncompressed']
for chunk_type in type_list:
agg_list = ['avg', 'sum', 'min', 'max', 'count', 'range', 'first', 'last', 'std.p', 'std.s', 'var.p',
'var.s'] # more
for agg in agg_list:
agg_key = _insert_agg_data(r, key, agg, chunk_type, fromTS, toTS,
key_create_args=['DUPLICATE_POLICY', 'LAST'])
# sanity + check result have changed
expected_result1 = r.execute_command('TS.RANGE', key, fromTS, toTS, 'aggregation', agg, 10)
actual_result1 = r.execute_command('TS.RANGE', agg_key, fromTS, toTS)
assert expected_result1 == actual_result1
assert len(expected_result1) == 999
for i in range(fromTS + 5, toTS - 4, 10):
assert r.execute_command('TS.ADD', key, i, 42)
expected_result2 = r.execute_command('TS.RANGE', key, fromTS, toTS, 'aggregation', agg, 10)
actual_result2 = r.execute_command('TS.RANGE', agg_key, fromTS, toTS)
assert expected_result2 == actual_result2
# remove aggs with identical results
compare_list = ['avg', 'sum', 'min', 'range', 'std.p', 'std.s', 'var.p', 'var.s']
if agg in compare_list:
assert expected_result1 != expected_result2
assert actual_result1 != actual_result2
r.execute_command('DEL', key)
r.execute_command('DEL', agg_key)
def test_downsampling_rules(self):
"""
Test downsmapling rules - avg,min,max,count,sum with 4 keys each.
Downsample in resolution of:
1sec (should be the same length as the original series),
3sec (number of samples is divisible by 10),
10s (number of samples is not divisible by 10),
1000sec (series should be empty since there are not enough samples)
Insert some data and check that the length, the values and the info of the downsample series are as expected.
"""
with Env().getClusterConnectionIfNeeded() as r:
key = 'tester{abc}'
assert r.execute_command('TS.CREATE', key)
rules = ['avg', 'sum', 'count', 'max', 'min']
resolutions = [1, 3, 10, 1000]
for rule in rules:
for resolution in resolutions:
agg_key = '{}_{}_{}'.format(key, rule, resolution)
assert r.execute_command('TS.CREATE', agg_key)
assert r.execute_command('TS.CREATERULE', key, agg_key, 'AGGREGATION', rule, resolution)
start_ts = 0
samples_count = 501
end_ts = start_ts + samples_count
values = list(range(samples_count))
_insert_data(r, key, start_ts, samples_count, values)
r.execute_command('TS.ADD', key, 3000, 7.77)
for rule in rules:
for resolution in resolutions:
actual_result = r.execute_command('TS.RANGE', '{}_{}_{}'.format(key, rule, resolution),
start_ts, end_ts)
assert len(actual_result) == math.ceil(samples_count / float(resolution))
expected_result = calc_rule(rule, values, resolution)
assert _get_series_value(actual_result) == expected_result
# last time stamp should be the beginning of the last bucket
assert _get_ts_info(r, '{}_{}_{}'.format(key, rule, resolution)).last_time_stamp == \
(samples_count - 1) - (samples_count - 1) % resolution
# test for results after empty buckets
r.execute_command('TS.ADD', key, 6000, 0)
for rule in rules:
for resolution in resolutions:
actual_result = r.execute_command('TS.RANGE', '{}_{}_{}'.format(key, rule, resolution),
3000, 6000)
assert len(actual_result) == 1
assert _get_series_value(actual_result) == [7.77] or \
_get_series_value(actual_result) == [1]
def test_backfill_downsampling(self):
env = Env()
with env.getClusterConnectionIfNeeded() as r:
key = 'tester{a}'
type_list = ['', 'uncompressed']
for chunk_type in type_list:
agg_list = ['sum', 'min', 'max', 'count', 'first', 'last'] # more
for agg in agg_list:
agg_key = _insert_agg_data(r, key, agg, chunk_type, key_create_args=['DUPLICATE_POLICY', 'LAST'])
expected_result = r.execute_command('TS.RANGE', key, 10, 50, 'aggregation', agg, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 10, 50)
assert expected_result == actual_result
assert r.execute_command('TS.ADD', key, 15, 50) == 15
expected_result = r.execute_command('TS.RANGE', key, 10, 50, 'aggregation', agg, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 10, 50)
assert expected_result == actual_result
# add in latest window
r.execute_command('TS.ADD', key, 1055, 50) == 1055
r.execute_command('TS.ADD', key, 1053, 55) == 1053
r.execute_command('TS.ADD', key, 1062, 60) == 1062
expected_result = r.execute_command('TS.RANGE', key, 10, 1060, 'aggregation', agg, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 10, 1060)
assert expected_result == actual_result
# update in latest window
r.execute_command('TS.ADD', key, 1065, 65) == 1065
r.execute_command('TS.ADD', key, 1066, 66) == 1066
r.execute_command('TS.ADD', key, 1001, 42) == 1001
r.execute_command('TS.ADD', key, 1075, 50) == 1075
expected_result = r.execute_command('TS.RANGE', key, 10, 1070, 'aggregation', agg, 10)
actual_result = r.execute_command('TS.RANGE', agg_key, 10, 1070)
env.assertEqual(expected_result, actual_result)
r.execute_command('DEL', key)
r.execute_command('DEL', agg_key)
def test_rule_timebucket_64bit(self):
Env().skipOnCluster()
with Env().getClusterConnectionIfNeeded() as r:
BELOW_32BIT_LIMIT = 2147483647
ABOVE_32BIT_LIMIT = 2147483648
r.execute_command("ts.create", 'test_key', 'RETENTION', ABOVE_32BIT_LIMIT)
r.execute_command("ts.create", 'below_32bit_limit')
r.execute_command("ts.create", 'above_32bit_limit')
r.execute_command("ts.createrule", 'test_key', 'below_32bit_limit', 'AGGREGATION', 'max', BELOW_32BIT_LIMIT)
r.execute_command("ts.createrule", 'test_key', 'above_32bit_limit', 'AGGREGATION', 'max', ABOVE_32BIT_LIMIT)
info = _get_ts_info(r, 'test_key')
assert info.rules[0][1] == BELOW_32BIT_LIMIT
assert info.rules[1][1] == ABOVE_32BIT_LIMIT
| 49.508929
| 116
| 0.62092
|
71e4e98a075c68c33cb94c262a55f4da0dad8712
| 1,928
|
py
|
Python
|
unittests/test_cpp_standards.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 80
|
2015-01-01T08:20:16.000Z
|
2020-05-03T15:58:27.000Z
|
unittests/test_cpp_standards.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 99
|
2015-01-10T09:36:43.000Z
|
2020-05-07T20:03:18.000Z
|
unittests/test_cpp_standards.py
|
RoyVorster/pygccxml
|
f487b1e26e88d521d623e6a587510b322f7d3dc7
|
[
"BSL-1.0"
] | 27
|
2015-03-12T20:00:13.000Z
|
2019-11-28T09:07:02.000Z
|
# Copyright 2014-2017 Insight Software Consortium.
# Copyright 2004-2009 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
import platform
import unittest
from . import parser_test_case
from pygccxml import parser
class Test(parser_test_case.parser_test_case_t):
def test(self):
"""
Test different compilation standards by setting cflags.
"""
# Skip this test for gccxml, this is a CastXML feature.
if "gccxml" in self.config.xml_generator:
return True
parser.parse(["cpp_standards.hpp"], self.config)
if platform.system() != 'Windows':
self.config.cflags = "-std=c++98"
parser.parse(["cpp_standards.hpp"], self.config)
self.config.cflags = "-std=c++03"
parser.parse(["cpp_standards.hpp"], self.config)
self.config.cflags = "-std=c++11"
parser.parse(["cpp_standards.hpp"], self.config)
# This is broken with llvm 3.6.2 (the one from homebrew)
# It should work with never llvms but I keep the test disabled
# See https://llvm.org/bugs/show_bug.cgi?id=24872
# self.config.cflags = "-std=c++14"
# parser.parse(["cpp_standards.hpp"], self.config)
# Same as above
# self.config.cflags = "-std=c++1z"
# parser.parse(["cpp_standards.hpp"], self.config)
# Pass down a flag that does not exist.
# This should raise an exception.
self.config.cflags = "-std=c++00"
self.assertRaises(
RuntimeError,
lambda: parser.parse(["cpp_standards.hpp"], self.config))
def create_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run(create_suite())
if __name__ == "__main__":
run_suite()
| 28.352941
| 70
| 0.635892
|
fe3b80424e8c9107df15af184d8c210dbbc8abd5
| 4,036
|
py
|
Python
|
examples/plot_matching_outlier.py
|
atong01/unbalanced_gromov_wasserstein
|
7675d8512d81007cc29e7564f1f6784d81f39740
|
[
"MIT"
] | 1
|
2020-09-11T02:05:11.000Z
|
2020-09-11T02:05:11.000Z
|
examples/plot_matching_outlier.py
|
dpduanpu/unbalanced_gromov_wasserstein
|
8f37685e801ae65bda9fc421070b92e414bc2980
|
[
"MIT"
] | null | null | null |
examples/plot_matching_outlier.py
|
dpduanpu/unbalanced_gromov_wasserstein
|
8f37685e801ae65bda9fc421070b92e414bc2980
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from solver.utils_numpy import euclid_dist
from ot.gromov import gromov_wasserstein
from solver.tlb_kl_sinkhorn_solver import TLBSinkhornSolver
path = os.getcwd() + "/output"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/plots"
if not os.path.isdir(path):
os.mkdir(path)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def generate_data(nsample, nout, noise, normalize=False):
z, w = np.linspace(0, np.pi, nsample), np.linspace(0., 1., nout)
x = np.transpose(np.stack((np.cos(z), np.sin(z))))
y = np.transpose(np.stack((1 - np.cos(z), 1 - np.sin(z) - .5)))
outlier = np.transpose(np.stack((-1 - w, -w)))
x = np.concatenate((x, outlier))
# Generate weights
if normalize:
a, b = np.ones(x.shape[0]) / x.shape[0], np.ones(y.shape[0]) / y.shape[0]
else:
a, b = np.ones(x.shape[0]) / x.shape[0], np.ones(y.shape[0]) / x.shape[0]
return a, x + noise * np.random.normal(size=x.shape), b, y + noise * np.random.normal(size=y.shape)
def plot_density_matching(pi, a, x, b, y, ids, alpha, linewidth, fontsize, ftitle='', fname=None):
n1, n2 = b.shape[0], a.shape[0] - b.shape[0]
marg1, marg2 = np.sum(pi, axis=1), np.sum(pi, axis=0)
plt.figure(figsize=(6., 6.))
plt.scatter(x[:n1, 0], x[:n1, 1], c='b', s=(marg1 / a)[:n1] ** 2 * 25., zorder=1)
plt.scatter(x[n1:, 0], x[n1:, 1], c='c', s=(marg1 / a)[n1:] ** 2 * 25., zorder=1)
plt.scatter(y[:, 0], y[:, 1], c='r', s=(marg2 / b) ** 2 * 25., zorder=1)
# Plot argmax of coupling
for i in ids:
ids = (-pi[i, :]).argsort()[:5]
for j in ids:
w = pi[i, j] / a[i]
t, u = [x[i][0], y[j][0]], [x[i][1], y[j][1]]
plt.plot(t, u, c='k', alpha=w * alpha, linewidth=linewidth, zorder=0)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.xlim(-2.3, 2.3)
plt.ylim(-1.5, 1.5)
# plt.title(ftitle, fontsize=fontsize)
if fname is not None:
plt.savefig(fname)
if __name__ == '__main__':
dim = 2
rho = 0.01
eps = 0.001
nsample, nout = 300, 50
compute_balanced = False
solver = TLBSinkhornSolver(nits=500, nits_sinkhorn=1000, gradient=False, tol=1e-3, tol_sinkhorn=1e-3)
ids = np.concatenate((np.arange(start=0, stop=nsample, step=10),
np.arange(start=nsample, stop=nsample + nout, step=10)))
for noise in [0.1]:
print(f"NOISE = {noise}")
a, x, b, y = generate_data(nsample=nsample, nout=nout, noise=noise, normalize=True)
# Generate costs and transport plan
Cx, Cy = euclid_dist(x, x), euclid_dist(y, y)
if compute_balanced:
pi_b = gromov_wasserstein(Cx, Cy, a, b, loss_fun='square_loss')
plot_density_matching(pi_b, a, x, b, y, ids, alpha=1., linewidth=0.5, fontsize=16,
ftitle='Balanced GW matching')
plt.legend()
plt.savefig(f"matching_outlier_balanced_noise{noise}.png")
plt.show()
Cx, Cy = torch.from_numpy(Cx).cuda(), torch.from_numpy(Cy).cuda()
for rho, eps in [(0.01, 0.001), (1.0, 0.001)]:
print(f" PARAMS = {rho, eps}")
a, b = torch.from_numpy(a).cuda(), torch.from_numpy(b).cuda()
pi, _ = solver.tlb_sinkhorn(a, Cx, b, Cy, rho=rho, eps=eps, init=None)
print(f"Sum of transport plans = {pi.sum().item()}")
# Plot matchings between measures
a, b = a.cpu().data.numpy(), b.cpu().data.numpy()
pi = pi.cpu().data.numpy()
print(f"Total mass of plan is {np.sum(pi)}")
plot_density_matching(pi, a, x, b, y, ids, alpha=1., linewidth=1., fontsize=16,
ftitle=f'Unbalanced GW matching, $\\rho$={rho}, $\epsilon$={eps}', fname=None)
plt.legend()
plt.savefig(f"matching_outlier_unbalanced_noise{noise}_rho{rho}_eps{eps}.png")
plt.show()
| 39.184466
| 112
| 0.573588
|
e329c69d7dcb64ff9e9e4dab30f4c65f9938d461
| 91,992
|
py
|
Python
|
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/builtin_monitors/kubernetes_monitor.py
|
echee2/scalyr-agent-2
|
da7d168260b94dc95aedb5ae0dca03165e55cb02
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# author: Imron Alston <imron@scalyr.com>
__author__ = 'imron@scalyr.com'
import datetime
import docker
import fnmatch
import traceback
import logging
import os
import re
import random
import socket
import stat
from string import Template
import struct
import sys
import time
import threading
from scalyr_agent import ScalyrMonitor, define_config_option, define_metric
import scalyr_agent.util as scalyr_util
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.json_lib import JsonObject
from scalyr_agent.json_lib import JsonConversionException, JsonMissingFieldException
from scalyr_agent.log_watcher import LogWatcher
from scalyr_agent.monitor_utils.server_processors import LineRequestParser
from scalyr_agent.monitor_utils.server_processors import RequestSizeExceeded
from scalyr_agent.monitor_utils.server_processors import RequestStream
from scalyr_agent.monitor_utils.k8s import KubernetesApi, KubeletApi, KubeletApiException, KubernetesCache, PodInfo, DockerMetricFetcher
from scalyr_agent.util import StoppableThread
from scalyr_agent.util import RunState
from requests.packages.urllib3.exceptions import ProtocolError
global_log = scalyr_logging.getLogger(__name__)
__monitor__ = __name__
define_config_option(__monitor__, 'module',
'Always ``scalyr_agent.builtin_monitors.kubernetes_monitor``',
convert_to=str, required_option=True)
define_config_option( __monitor__, 'container_name',
'Optional (defaults to None). Defines a regular expression that matches the name given to the '
'container running the scalyr-agent.\n'
'If this is None, the scalyr agent will look for a container running /usr/sbin/scalyr-agent-2 as the main process.\n',
convert_to=str, default=None)
define_config_option( __monitor__, 'container_check_interval',
'Optional (defaults to 5). How often (in seconds) to check if containers have been started or stopped.',
convert_to=int, default=5)
define_config_option( __monitor__, 'api_socket',
'Optional (defaults to /var/scalyr/docker.sock). Defines the unix socket used to communicate with '
'the docker API. WARNING, if you have `mode` set to `syslog`, you must also set the '
'`docker_api_socket` configuration option in the syslog monitor to this same value\n'
'Note: You need to map the host\'s /run/docker.sock to the same value as specified here, using the -v parameter, e.g.\n'
'\tdocker run -v /run/docker.sock:/var/scalyr/docker.sock ...',
convert_to=str, default='/var/scalyr/docker.sock')
define_config_option( __monitor__, 'docker_api_version',
'Optional (defaults to \'auto\'). The version of the Docker API to use. WARNING, if you have '
'`mode` set to `syslog`, you must also set the `docker_api_version` configuration option in the '
'syslog monitor to this same value\n',
convert_to=str, default='auto')
define_config_option( __monitor__, 'docker_log_prefix',
'Optional (defaults to docker). Prefix added to the start of all docker logs. ',
convert_to=str, default='docker')
define_config_option( __monitor__, 'docker_max_parallel_stats',
'Optional (defaults to 20). Maximum stats requests to issue in parallel when retrieving container '
'metrics using the Docker API.', convert_to=int, default=20)
define_config_option( __monitor__, 'max_previous_lines',
'Optional (defaults to 5000). The maximum number of lines to read backwards from the end of the stdout/stderr logs\n'
'when starting to log a containers stdout/stderr to find the last line that was sent to Scalyr.',
convert_to=int, default=5000)
define_config_option( __monitor__, 'readback_buffer_size',
'Optional (defaults to 5k). The maximum number of bytes to read backwards from the end of any log files on disk\n'
'when starting to log a containers stdout/stderr. This is used to find the most recent timestamp logged to file '
'was sent to Scalyr.',
convert_to=int, default=5*1024)
define_config_option( __monitor__, 'log_mode',
'Optional (defaults to "docker_api"). Determine which method is used to gather logs from the '
'local containers. If "docker_api", then this agent will use the docker API to contact the local '
'containers and pull logs from them. If "syslog", then this agent expects the other containers '
'to push logs to this one using the syslog Docker log plugin. Currently, "syslog" is the '
'preferred method due to bugs/issues found with the docker API. It is not the default to protect '
'legacy behavior.\n',
convert_to=str, default="docker_api")
define_config_option( __monitor__, 'metrics_only',
'Optional (defaults to False). If true, the docker monitor will only log docker metrics and not any other information '
'about running containers.\n',
convert_to=bool, default=False)
define_config_option( __monitor__, 'container_globs',
'Optional (defaults to None). If true, a list of glob patterns for container names. Only containers whose names '
'match one of the glob patterns will be monitored.',
default=None)
define_config_option( __monitor__, 'report_container_metrics',
'Optional (defaults to True). If true, metrics will be collected from the container and reported '
'to Scalyr. Note, metrics are only collected from those containers whose logs are being collected',
convert_to=bool, default=True)
define_config_option( __monitor__, 'report_k8s_metrics',
'Optional (defaults to True). If true and report_container_metrics is true, metrics will be '
'collected from the k8s and reported to Scalyr. ', convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_ignore_namespaces',
'Optional (defaults to "kube-system"). A comma-delimited list of the namespaces whose pods\'s '
'logs should not be collected and sent to Scalyr.', convert_to=str, default="kube-system")
define_config_option( __monitor__, 'k8s_ignore_pod_sandboxes',
'Optional (defaults to True). If True then all containers with the label '
'`io.kubernetes.docker.type` equal to `podsandbox` are excluded from the'
'logs being collected', convert_to=bool, default=True)
define_config_option( __monitor__, 'k8s_include_all_containers',
'Optional (defaults to True). If True, all containers in all pods will be monitored by the kubernetes monitor '
'unless they have an include: false or exclude: true annotation. '
'If false, only pods/containers with an include:true or exclude:false annotation '
'will be monitored. See documentation on annotations for further detail.', convert_to=bool, default=True)
define_config_option( __monitor__, 'k8s_use_v2_attributes',
'Optional (defaults to False). If True, will use v2 version of attribute names instead of '
'the names used with the original release of this monitor. This is a breaking change so could '
'break searches / alerts if you rely on the old names', convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_use_v1_and_v2_attributes',
'Optional (defaults to False). If True, send attributes using both v1 and v2 versions of their'
'names. This may be used to fix breakages when you relied on the v1 attribute names',
convert_to=bool, default=False)
define_config_option( __monitor__, 'k8s_api_url',
'Optional (defaults to "https://kubernetes.default"). The URL for the Kubernetes API server for '
'this cluster.', convert_to=str, default='https://kubernetes.default')
define_config_option( __monitor__, 'k8s_cache_expiry_secs',
'Optional (defaults to 30). The amount of time to wait between fully updating the k8s cache from the k8s api. '
'Increase this value if you want less network traffic from querying the k8s api. Decrease this value if you '
'want dynamic updates to annotation configuration values to be processed more quickly.',
convert_to=int, default=30)
define_config_option( __monitor__, 'k8s_cache_purge_secs',
'Optional (defaults to 300). The number of seconds to wait before purging unused items from the k8s cache',
convert_to=int, default=300)
define_config_option( __monitor__, 'k8s_cache_init_abort_delay',
'Optional (defaults to 20). The number of seconds to wait for initialization of the kubernetes cache before aborting '
'the kubernetes_monitor.',
convert_to=int, default=20)
define_config_option( __monitor__, 'k8s_parse_json',
'Optional (defaults to True). If True, the log files will be parsed as json before uploading to the server '
'to extract log and timestamp fields. If False, the raw json will be uploaded to Scalyr.',
convert_to=bool, default=True)
define_config_option( __monitor__, 'verify_k8s_api_queries',
'Optional (defaults to True). If true, then the ssl connection for all queries to the k8s API will be verified using '
'the ca.crt certificate found in the service account directory. If false, no verification will be performed. '
'This is useful for older k8s clusters where certificate verification can fail.',
convert_to=bool, default=True)
define_config_option( __monitor__, 'gather_k8s_pod_info',
'Optional (defaults to False). If true, then every gather_sample interval, metrics will be collected '
'from the docker and k8s APIs showing all discovered containers and pods. This is mostly a debugging aid '
'and there are performance implications to always leaving this enabled', convert_to=bool, default=False)
define_config_option( __monitor__, 'include_daemonsets_as_deployments',
'Deprecated',
convert_to=bool, default=True)
# for now, always log timestamps to help prevent a race condition
#define_config_option( __monitor__, 'log_timestamps',
# 'Optional (defaults to False). If true, stdout/stderr logs will contain docker timestamps at the beginning of the line\n',
# convert_to=bool, default=False)
define_metric( __monitor__, "docker.net.rx_bytes", "Total received bytes on the network interface", cumulative=True, unit="bytes", category="Network" )
define_metric( __monitor__, "docker.net.rx_dropped", "Total receive packets dropped on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.rx_errors", "Total receive errors on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.rx_packets", "Total received packets on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_bytes", "Total transmitted bytes on the network interface", cumulative=True, unit="bytes", category="Network" )
define_metric( __monitor__, "docker.net.tx_dropped", "Total transmitted packets dropped on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_errors", "Total transmission errors on the network interface", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.net.tx_packets", "Total packets transmitted on the network intervace", cumulative=True, category="Network" )
define_metric( __monitor__, "docker.mem.stat.active_anon", "The number of bytes of active memory backed by anonymous pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.active_file", "The number of bytes of active memory backed by files, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.cache", "The number of bytes used for the cache, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.hierarchical_memory_limit", "The memory limit in bytes for the container.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.inactive_anon", "The number of bytes of inactive memory in anonymous pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.inactive_file", "The number of bytes of inactive memory in file pages, excluding sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.mapped_file", "The number of bytes of mapped files, excluding sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgfault", "The total number of page faults, excluding sub-cgroups.", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgmajfault", "The number of major page faults, excluding sub-cgroups", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgpgin", "The number of charging events, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.pgpgout", "The number of uncharging events, excluding sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.rss", "The number of bytes of anonymous and swap cache memory (includes transparent hugepages), excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.rss_huge", "The number of bytes of anonymous transparent hugepages, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.unevictable", "The number of bytes of memory that cannot be reclaimed (mlocked etc), excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.writeback", "The number of bytes being written back to disk, excluding sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_active_anon", "The number of bytes of active memory backed by anonymous pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_active_file", "The number of bytes of active memory backed by files, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_cache", "The number of bytes used for the cache, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_inactive_anon", "The number of bytes of inactive memory in anonymous pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_inactive_file", "The number of bytes of inactive memory in file pages, including sub-cgroups.", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_mapped_file", "The number of bytes of mapped files, including sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgfault", "The total number of page faults, including sub-cgroups.", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgmajfault","The number of major page faults, including sub-cgroups", cumulative=True, category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgpgin", "The number of charging events, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_pgpgout", "The number of uncharging events, including sub-groups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_rss", "The number of bytes of anonymous and swap cache memory (includes transparent hugepages), including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_rss_huge", "The number of bytes of anonymous transparent hugepages, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_unevictable", "The number of bytes of memory that cannot be reclaimed (mlocked etc), including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.stat.total_writeback", "The number of bytes being written back to disk, including sub-cgroups", category="Memory" )
define_metric( __monitor__, "docker.mem.max_usage", "The max amount of memory used by container in bytes.", unit="bytes", category="Memory" )
define_metric( __monitor__, "docker.mem.usage", "The current number of bytes used for memory including cache.", unit="bytes", category="Memory" )
define_metric( __monitor__, "docker.mem.fail_cnt", "The number of times the container hit its memory limit", category="Memory" )
define_metric( __monitor__, "docker.mem.limit", "The memory limit for the container in bytes.", unit="bytes", category="Memory")
define_metric( __monitor__, "docker.cpu.usage", "Total CPU consumed by container in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.system_cpu_usage", "Total CPU consumed by container in kernel mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.usage_in_usermode", "Total CPU consumed by tasks of the cgroup in user mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.total_usage", "Total CPU consumed by tasks of the cgroup in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.usage_in_kernelmode", "Total CPU consumed by tasks of the cgroup in kernel mode in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.periods", "The number of of periods with throttling active.", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.throttled_periods", "The number of periods where the container hit its throttling limit", cumulative=True, category="CPU" )
define_metric( __monitor__, "docker.cpu.throttling.throttled_time", "The aggregate amount of time the container was throttled in nanoseconds", cumulative=True, category="CPU" )
define_metric( __monitor__, "k8s.pod.network.rx_bytes", "The total received bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.rx_errors", "The total received errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.tx_bytes", "The total transmitted bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.pod.network.tx_errors", "The total transmission errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.rx_bytes", "The total received bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.rx_errors", "The total received errors on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.tx_bytes", "The total transmitted bytes on a pod", cumulative=True, category="Network" )
define_metric( __monitor__, "k8s.node.network.tx_errors", "The total transmission errors on a pod", cumulative=True, category="Network" )
# A mapping of k8s controller kinds to the appropriate field name
# passed to the scalyr server for metrics that originate from pods
# controlled by that object. See #API-62
_CONTROLLER_KEYS = {
'CronJob' : 'k8s-cron-job',
'DaemonSet' : 'k8s-daemon-set',
'Deployment' : 'k8s-deployment',
'Job' : 'k8s-job',
'ReplicaSet': 'k8s-replica-set',
'ReplicationController': 'k8s-replication-controller',
'StatefulSet': 'k8s-stateful-set'
}
class K8sInitException( Exception ):
""" Wrapper exception to indicate when the monitor failed to start due to
a problem with initializing the k8s cache
"""
class WrappedStreamResponse( object ):
""" Wrapper for generator returned by docker.Client._stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response, decode ):
self.client = client
self.response = response
self.decode = self.decode
def __iter__( self ):
for item in super( DockerClient, self.client )._stream_helper( self.response, self.decode ):
yield item
class WrappedRawResponse( object ):
""" Wrapper for generator returned by docker.Client._stream_raw_result
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response ):
self.client = client
self.response = response
def __iter__( self ):
for item in super( DockerClient, self.client )._stream_raw_result( self.response ):
yield item
class WrappedMultiplexedStreamResponse( object ):
""" Wrapper for generator returned by docker.Client._multiplexed_response_stream_helper
that gives us access to the response, and therefore the socket, so that
we can shutdown the socket from another thread if needed
"""
def __init__( self, client, response ):
self.client = client
self.response = response
def __iter__( self ):
for item in super( DockerClient, self.client )._multiplexed_response_stream_helper( self.response ):
yield item
class DockerClient( docker.Client ):
""" Wrapper for docker.Client to return 'wrapped' versions of streamed responses
so that we can have access to the response object, which allows us to get the
socket in use, and shutdown the blocked socket from another thread (e.g. upon
shutdown
"""
def _stream_helper( self, response, decode=False ):
return WrappedStreamResponse( self, response, decode )
def _stream_raw_result( self, response ):
return WrappedRawResponse( self, response )
def _multiplexed_response_stream_helper( self, response ):
return WrappedMultiplexedStreamResponse( self, response )
def _get_raw_response_socket(self, response):
if response.raw._fp.fp:
return super( DockerClient, self )._get_raw_response_socket( response )
return None
def _split_datetime_from_line( line ):
"""Docker timestamps are in RFC3339 format: 2015-08-03T09:12:43.143757463Z, with everything up to the first space
being the timestamp.
"""
log_line = line
dt = datetime.datetime.utcnow()
pos = line.find( ' ' )
if pos > 0:
dt = scalyr_util.rfc3339_to_datetime( line[0:pos] )
log_line = line[pos+1:]
return (dt, log_line)
def _get_short_cid( container_id ):
"""returns a shortened container id. Useful for logging, where using a full length container id
is not necessary and would just add noise to the log.
The shortened container id will contain enough information to uniquely
identify the container for most situations. Note: the returned value
should never be used as a key in a dict for containers because there is
always the remote possibility of a conflict (given a large enough number
of containers).
"""
# return the first 8 chars of the container id.
# we don't need to check for length because even if len( container_id ) < 8
# it's still valid to slice beyond the end of a string. See:
# https://docs.python.org/2/reference/expressions.html#slicings
return container_id[:8]
def _ignore_old_dead_container( container, created_before=None ):
"""
Returns True or False to determine whether we should ignore the
logs for a dead container, depending on whether the create time
of the container is before a certain threshold time (specified in
seconds since the epoch).
If the container was created before the threshold time, then the
container logs will be ignored.
Otherwise the logs of the dead container will be uploaded.
"""
# check for recently finished containers
if created_before is not None:
state = container.get( 'State', {} )
#ignore any that are finished and that are also too old
if state != 'running':
created = container.get( 'Created', 0 ) # default to a long time ago
if created < created_before:
return True
return False
def _get_containers(client, ignore_container=None, restrict_to_container=None, logger=None,
only_running_containers=True, running_or_created_after=None, glob_list=None, include_log_path=False, k8s_cache=None,
k8s_include_by_default=True, k8s_namespaces_to_exclude=None, ignore_pod_sandboxes=True, current_time=None):
"""Queries the Docker API and returns a dict of running containers that maps container id to container name, and other info
@param client: A docker.Client object
@param ignore_container: String, a single container id to exclude from the results (useful for ignoring the scalyr_agent container)
@param restrict_to_container: String, a single continer id that will be the only returned result
@param logger: scalyr_logging.Logger. Allows the caller to write logging output to a specific logger. If None the default agent.log
logger is used.
@param only_running_containers: Boolean. If true, will only return currently running containers
@param running_or_created_after: Unix timestamp. If specified, the results will include any currently running containers *and* any
dead containers that were created after the specified time. Used to pick up short-lived containers.
@param glob_list: String. A glob string that limit results to containers whose container names match the glob
@param include_log_path: Boolean. If true include the path to the raw log file on disk as part of the extra info mapped to the container id.
@param k8s_cache: KubernetesCache. If not None, k8s information (if it exists) for the container will be added as part of the extra info mapped to the container id
@param k8s_include_by_default: Boolean. If True, then all k8s containers are included by default, unless an include/exclude annotation excludes them.
If False, then all k8s containers are excluded by default, unless an include/exclude annotation includes them.
@param k8s_namespaces_to_exclude: List The of namespaces whose containers should be excluded.
@param ignore_pod_sandboxes: Boolean. If True then any k8s pod sandbox containers are ignored from the list of monitored containers
@param current_time. Timestamp since the epoch
"""
if logger is None:
logger = global_log
k8s_labels = {
'pod_uid': 'io.kubernetes.pod.uid',
'pod_name': 'io.kubernetes.pod.name',
'pod_namespace': 'io.kubernetes.pod.namespace',
'k8s_container_name': 'io.kubernetes.container.name'
}
if running_or_created_after is not None:
only_running_containers=False
result = {}
try:
filters = {"id": restrict_to_container} if restrict_to_container is not None else None
response = client.containers(filters=filters, all=not only_running_containers)
for container in response:
cid = container['Id']
short_cid = _get_short_cid( cid )
if ignore_container is not None and cid == ignore_container:
continue
# Note we need to *include* results that were created after the 'running_or_created_after' time.
# that means we need to *ignore* any containers created before that
# hence the reason 'create_before' is assigned to a value named '...created_after'
if _ignore_old_dead_container( container, created_before=running_or_created_after ):
continue
if len( container['Names'] ) > 0:
name = container['Names'][0].lstrip('/')
# ignore any pod sandbox containers
if ignore_pod_sandboxes:
container_type = container.get( 'Labels', {} ).get( 'io.kubernetes.docker.type', '' )
if container_type == 'podsandbox':
continue
add_container = True
if glob_list:
add_container = False
for glob in glob_list:
if fnmatch.fnmatch( name, glob ):
add_container = True
break
if add_container:
log_path = None
k8s_info = None
status = None
if include_log_path or k8s_cache is not None:
try:
info = client.inspect_container( cid )
log_path = info['LogPath'] if include_log_path and 'LogPath' in info else None
if not only_running_containers:
status = info['State']['Status']
if k8s_cache is not None:
config = info.get('Config', {} )
labels = config.get( 'Labels', {} )
k8s_info = {}
missing_field = False
for key, label in k8s_labels.iteritems():
value = labels.get( label )
if value:
k8s_info[key] = value
else:
missing_field = True
logger.warn( "Missing kubernetes label '%s' in container %s" % (label, short_cid), limit_once_per_x_secs=300,limit_key="docker-inspect-k8s-%s" % short_cid)
if missing_field:
logger.log( scalyr_logging.DEBUG_LEVEL_1, "Container Labels %s" % (scalyr_util.json_encode(labels)), limit_once_per_x_secs=300,limit_key="docker-inspect-container-dump-%s" % short_cid)
if 'pod_name' in k8s_info and 'pod_namespace' in k8s_info:
if k8s_namespaces_to_exclude is not None and k8s_info['pod_namespace'] in k8s_namespaces_to_exclude:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Excluding container '%s' based excluded namespaces" % short_cid)
continue
pod = k8s_cache.pod( k8s_info['pod_namespace'], k8s_info['pod_name'], current_time )
if pod:
k8s_info['pod_info'] = pod
k8s_container = k8s_info.get( 'k8s_container_name', None )
# check to see if we should exclude this container
default_exclude = not k8s_include_by_default
exclude = pod.exclude_pod( container_name=k8s_container, default=default_exclude)
if exclude:
if pod.annotations:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Excluding container '%s' based on pod annotations, %s" % (short_cid, str(pod.annotations)) )
continue
# add a debug message if containers are excluded by default but this container is included
if default_exclude and not exclude:
logger.log( scalyr_logging.DEBUG_LEVEL_2, "Including container '%s' based on pod annotations, %s" % (short_cid, str(pod.annotations)) )
except Exception, e:
logger.error("Error inspecting container '%s'" % cid, limit_once_per_x_secs=300,limit_key="docker-api-inspect")
result[cid] = {'name': name, 'log_path': log_path }
if status:
result[cid]['status'] = status
if k8s_info:
result[cid]['k8s_info'] = k8s_info
else:
result[cid] = {'name': cid, 'log_path': None}
except Exception, e: # container querying failed
logger.error("Error querying running containers", limit_once_per_x_secs=300,
limit_key='docker-api-running-containers' )
result = None
return result
class ContainerChecker( StoppableThread ):
"""
Monitors containers to check when they start and stop running.
"""
def __init__( self, config, logger, socket_file, docker_api_version, host_hostname, data_path, log_path,
include_all, include_controller_info, namespaces_to_ignore,
ignore_pod_sandboxes ):
self._config = config
self._logger = logger
self.__delay = self._config.get( 'container_check_interval' )
self.__log_prefix = self._config.get( 'docker_log_prefix' )
self.__name = self._config.get( 'container_name' )
self.__use_v2_attributes = self._config.get('k8s_use_v2_attributes')
self.__use_v1_and_v2_attributes = self._config.get('k8s_use_v1_and_v2_attributes')
self.__parse_json = self._config.get( 'k8s_parse_json' )
self.__socket_file = socket_file
self.__docker_api_version = docker_api_version
self.__client = None
self.container_id = None
self.__log_path = log_path
self.__host_hostname = host_hostname
self.__readback_buffer_size = self._config.get( 'readback_buffer_size' )
self.__glob_list = config.get( 'container_globs' )
# The namespace whose logs we should not collect.
self.__namespaces_to_ignore = namespaces_to_ignore
self.__ignore_pod_sandboxes = ignore_pod_sandboxes
# This is currently an experimental feature. Including controller information for every event uploaded about
# a pod (cluster name, controller name, controller labels)
self.__include_controller_info = include_controller_info
self.containers = {}
self.__include_all = include_all
self.__k8s = None
self.__k8s_cache_expiry_secs = self._config.get( 'k8s_cache_expiry_secs' )
self.__k8s_cache_purge_secs = self._config.get( 'k8s_cache_purge_secs' )
self.__k8s_cache_init_abort_delay = self._config.get( 'k8s_cache_init_abort_delay' )
self.k8s_cache = None
self.__log_watcher = None
self.__module = None
self.__start_time = time.time()
self.__thread = StoppableThread( target=self.check_containers, name="Container Checker" )
def start( self ):
try:
k8s_api_url = self._config.get('k8s_api_url')
if self._config.get( 'verify_k8s_api_queries' ):
self.__k8s = KubernetesApi(k8s_api_url=k8s_api_url)
else:
self.__k8s = KubernetesApi( ca_file=None, k8s_api_url=k8s_api_url)
self.__client = DockerClient( base_url=('unix:/%s'%self.__socket_file), version=self.__docker_api_version )
self.container_id = self.__get_scalyr_container_id( self.__client, self.__name )
# create the k8s cache
self.k8s_cache = KubernetesCache( self.__k8s, self._logger,
cache_expiry_secs=self.__k8s_cache_expiry_secs,
cache_purge_secs=self.__k8s_cache_purge_secs,
namespaces_to_ignore=self.__namespaces_to_ignore )
delay = 0.5
message_delay = 5
start_time = time.time()
message_time = start_time
abort = False
# wait until the k8s_cache is initialized before aborting
while not self.k8s_cache.is_initialized():
time.sleep( delay )
current_time = time.time()
# see if we need to print a message
elapsed = current_time - message_time
if elapsed > message_delay:
self._logger.log(scalyr_logging.DEBUG_LEVEL_0, 'start() - waiting for Kubernetes cache to be initialized' )
message_time = current_time
# see if we need to abort the monitor because we've been waiting too long for init
elapsed = current_time - start_time
if elapsed > self.__k8s_cache_init_abort_delay:
abort = True
break
if abort:
raise K8sInitException( "Unable to initialize kubernetes cache" )
# check to see if the user has manually specified a cluster name, and if so then
# force enable 'Starbuck' features
if self.k8s_cache.get_cluster_name() is not None:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "ContainerChecker - cluster name detected, enabling v2 attributes and controller information" )
self.__use_v2_attributes = True
self.__include_controller_info = True
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Attempting to retrieve list of containers:' )
self.containers = _get_containers(self.__client, ignore_container=self.container_id,
glob_list=self.__glob_list, include_log_path=True,
k8s_cache=self.k8s_cache, k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
# if querying the docker api fails, set the container list to empty
if self.containers is None:
self.containers = {}
self.raw_logs = []
self.docker_logs = self.__get_docker_logs( self.containers, self.k8s_cache )
#create and start the DockerLoggers
self.__start_docker_logs( self.docker_logs )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Initialization complete. Starting k8s monitor for Scalyr" )
self.__thread.start()
except K8sInitException, e:
global_log.warn( "Failed to start container checker - %s. Aborting kubernetes_monitor" % (str(e)) )
raise
except Exception, e:
global_log.warn( "Failed to start container checker - %s\n%s" % (str(e), traceback.format_exc() ))
def stop( self, wait_on_join=True, join_timeout=5 ):
self.__thread.stop( wait_on_join=wait_on_join, join_timeout=join_timeout )
#stop the DockerLoggers
for logger in self.raw_logs:
path = logger['log_config']['path']
if self.__log_watcher:
self.__log_watcher.remove_log_path( self.__module.module_name, path )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Stopping %s" % (path) )
self.raw_logs = []
def get_k8s_data( self ):
""" Convenience wrapper to query and process all pods
and pods retreived by the k8s API.
A filter is used to limit pods returned to those that
are running on the current node
@return: a dict keyed by namespace, whose values are a dict of pods inside that namespace, keyed by pod name
"""
result = {}
try:
result = self.k8s_cache.pods_shallow_copy()
except Exception, e:
global_log.warn( "Failed to get k8s data: %s\n%s" % (str(e), traceback.format_exc() ),
limit_once_per_x_secs=300, limit_key='get_k8s_data' )
return result
def check_containers( self, run_state ):
"""Update thread for monitoring docker containers and the k8s info such as labels
"""
# Assert that the cache has been initialized
if not self.k8s_cache.is_initialized():
self._logger.log(scalyr_logging.DEBUG_LEVEL_0, 'container_checker - Kubernetes cache not initialized' )
raise K8sInitException( "check_container - Kubernetes cache not initialized. Aborting" )
# store the digests from the previous iteration of the main loop to see
# if any pod information has changed
prev_digests = {}
base_attributes = self.__get_base_attributes()
previous_time = time.time()
while run_state.is_running():
try:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Attempting to retrieve list of containers:' )
current_time = time.time()
running_containers = _get_containers(
self.__client, ignore_container=self.container_id, running_or_created_after=previous_time,
glob_list=self.__glob_list, include_log_path=True, k8s_cache=self.k8s_cache,
k8s_include_by_default=self.__include_all, current_time=current_time,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
previous_time = current_time - 1
# if running_containers is None, that means querying the docker api failed.
# rather than resetting the list of running containers to empty
# continue using the previous list of containers
if running_containers is None:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Failed to get list of containers')
running_containers = self.containers
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Found %d containers' % len(running_containers))
#get the containers that have started since the last sample
starting = {}
changed = {}
digests = {}
for cid, info in running_containers.iteritems():
pod = None
if 'k8s_info' in info:
pod_name = info['k8s_info'].get( 'pod_name', 'invalid_pod' )
pod_namespace = info['k8s_info'].get( 'pod_namespace', 'invalid_namespace' )
pod = info['k8s_info'].get( 'pod_info', None )
if not pod:
self._logger.warning( "No pod info for container %s. pod: '%s/%s'" % (_get_short_cid( cid ), pod_namespace, pod_name),
limit_once_per_x_secs=300,
limit_key='check-container-pod-info-%s' % cid)
# start the container if have a container that wasn't running
if cid not in self.containers:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Starting loggers for container '%s'" % info['name'] )
starting[cid] = info
elif cid in prev_digests:
# container was running and it exists in the previous digest dict, so see if
# it has changed
if pod and prev_digests[cid] != pod.digest:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Pod digest changed for '%s'" % info['name'] )
changed[cid] = info
# store the digest from this iteration of the loop
if pod:
digests[cid] = pod.digest
#get the containers that have stopped
stopping = {}
for cid, info in self.containers.iteritems():
if cid not in running_containers:
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "Stopping logger for container '%s' (%s)" % (info['name'], cid[:6] ) )
stopping[cid] = info
#stop the old loggers
self.__stop_loggers( stopping )
#update the list of running containers
#do this before starting new ones, as starting up new ones
#will access self.containers
self.containers = running_containers
#start the new ones
self.__start_loggers( starting, self.k8s_cache )
prev_digests = digests
# update the log config for any changed containers
if self.__log_watcher:
for logger in self.raw_logs:
if logger['cid'] in changed:
info = changed[logger['cid']]
new_config = self.__get_log_config_for_container( logger['cid'], info, self.k8s_cache, base_attributes )
self._logger.log(scalyr_logging.DEBUG_LEVEL_1, "updating config for '%s'" % info['name'] )
self.__log_watcher.update_log_config( self.__module.module_name, new_config )
except Exception, e:
self._logger.warn( "Exception occurred when checking containers %s\n%s" % (str( e ), traceback.format_exc()) )
run_state.sleep_but_awaken_if_stopped( self.__delay )
def set_log_watcher( self, log_watcher, module ):
self.__log_watcher = log_watcher
self.__module = module
def __get_scalyr_container_id( self, client, name ):
"""Gets the container id of the scalyr-agent container
If the config option container_name is empty, then it is assumed that the scalyr agent is running
on the host and not in a container and None is returned.
"""
result = None
regex = None
if name is not None:
regex = re.compile( name )
# get all the containers
containers = client.containers()
for container in containers:
# see if we are checking on names
if name is not None:
# if so, loop over all container names for this container
# Note: containers should only have one name, but the 'Names' field
# is a list, so iterate over it just in case
for cname in container['Names']:
cname = cname.lstrip( '/' )
# check if the name regex matches
m = regex.match( cname )
if m:
result = container['Id']
break
# not checking container name, so check the Command instead to see if it's the agent
else:
if container['Command'].startswith( '/usr/sbin/scalyr-agent-2' ):
result = container['Id']
if result:
break
if not result:
# only raise an exception if we were looking for a specific name but couldn't find it
if name is not None:
raise Exception( "Unable to find a matching container id for container '%s'. Please make sure that a "
"container matching the regular expression '%s' is running." % (name, name) )
return result
def __stop_loggers( self, stopping ):
"""
Stops any DockerLoggers in the 'stopping' dict
@param: stopping - a dict of container ids => container names. Any running containers that have
the same container-id as a key in the dict will be stopped.
"""
if stopping:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Stopping all docker loggers')
# go through all the raw logs and see if any of them exist in the stopping list, and if so, stop them
for logger in self.raw_logs:
cid = logger['cid']
if cid in stopping:
path = logger['log_config']['path']
if self.__log_watcher:
self.__log_watcher.schedule_log_path_for_removal( self.__module.module_name, path )
self.raw_logs[:] = [l for l in self.raw_logs if l['cid'] not in stopping]
self.docker_logs[:] = [l for l in self.docker_logs if l['cid'] not in stopping]
def __start_loggers( self, starting, k8s_cache ):
"""
Starts a list of DockerLoggers
@param: starting - a list of DockerLoggers to start
"""
if starting:
self._logger.log(scalyr_logging.DEBUG_LEVEL_2, 'Starting all docker loggers')
docker_logs = self.__get_docker_logs( starting, k8s_cache )
self.__start_docker_logs( docker_logs )
self.docker_logs.extend( docker_logs )
def __start_docker_logs( self, docker_logs ):
for log in docker_logs:
if self.__log_watcher:
log['log_config'] = self.__log_watcher.add_log_config( self.__module, log['log_config'] )
self.raw_logs.append( log )
def __get_last_request_for_log( self, path ):
result = datetime.datetime.fromtimestamp( self.__start_time )
try:
full_path = os.path.join( self.__log_path, path )
fp = open( full_path, 'r', self.__readback_buffer_size )
# seek readback buffer bytes from the end of the file
fp.seek( 0, os.SEEK_END )
size = fp.tell()
if size < self.__readback_buffer_size:
fp.seek( 0, os.SEEK_SET )
else:
fp.seek( size - self.__readback_buffer_size, os.SEEK_SET )
first = True
for line in fp:
# ignore the first line because it likely started somewhere randomly
# in the line
if first:
first = False
continue
dt, _ = _split_datetime_from_line( line )
if dt:
result = dt
fp.close()
except Exception, e:
global_log.info( "%s", str(e) )
return scalyr_util.seconds_since_epoch( result )
def __create_log_config( self, parser, path, attributes, parse_as_json=False ):
"""Convenience function to create a log_config dict from the parameters"""
return { 'parser': parser,
'path': path,
'parse_lines_as_json' : parse_as_json,
'attributes': attributes
}
def __get_base_attributes( self ):
attributes = None
try:
attributes = JsonObject( { "monitor": "agentKubernetes" } )
if self.__host_hostname:
attributes['serverHost'] = self.__host_hostname
except Exception, e:
self._logger.error( "Error setting monitor attribute in KubernetesMonitor" )
raise
return attributes
def __get_log_config_for_container( self, cid, info, k8s_cache, base_attributes ):
result = None
container_attributes = base_attributes.copy()
if not self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['containerName'] = info['name']
container_attributes['containerId'] = cid
elif self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['container_id'] = cid
parser = 'docker'
common_annotations = {}
container_annotations = {}
# pod name and namespace are set to an invalid value for cases where errors occur and a log
# message is produced, so that the log message has clearly invalid values for these rather
# than just being empty
pod_name = '--'
pod_namespace = '--'
short_cid = _get_short_cid( cid )
# dict of available substitutions for the rename_logfile field
rename_vars = {
'short_id' : short_cid,
'container_id' : cid,
'container_name' : info['name'],
}
k8s_info = info.get( 'k8s_info', {} )
if k8s_info:
pod_name = k8s_info.get('pod_name', 'invalid_pod')
pod_namespace = k8s_info.get('pod_namespace', 'invalid_namespace')
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "got k8s info for container %s, '%s/%s'" % (short_cid, pod_namespace, pod_name) )
pod = k8s_cache.pod( pod_namespace, pod_name )
if pod:
rename_vars['pod_name'] = pod.name
rename_vars['namespace'] = pod.namespace
rename_vars['node_name'] = pod.node_name
container_attributes['pod_name'] = pod.name
container_attributes['pod_namespace'] = pod.namespace
container_attributes['pod_uid'] = pod.uid
if not self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['node_name'] = pod.node_name
elif self.__use_v2_attributes or self.__use_v1_and_v2_attributes:
container_attributes['k8s_node'] = pod.node_name
container_attributes['scalyr-category'] = 'log'
for label, value in pod.labels.iteritems():
container_attributes[label] = value
if 'parser' in pod.labels:
parser = pod.labels['parser']
# get the controller information if any
if pod.controller is not None:
controller = pod.controller
# for backwards compatibility allow both deployment_name and controller_name here
rename_vars['deployment_name'] = controller.name
rename_vars['controller_name'] = controller.name
if self.__include_controller_info:
container_attributes['_k8s_dn'] = controller.name
container_attributes['_k8s_dl'] = controller.flat_labels
container_attributes['_k8s_ck'] = controller.kind
# get the cluster name
cluster_name = k8s_cache.get_cluster_name()
if self.__include_controller_info and cluster_name is not None:
container_attributes['_k8s_cn'] = cluster_name
# get the annotations of this pod as a dict.
# by default all annotations will be applied to all containers
# in the pod
all_annotations = pod.annotations
container_specific_annotations = False
# get any common annotations for all containers
for annotation, value in all_annotations.iteritems():
if annotation in pod.container_names:
container_specific_annotations = True
else:
common_annotations[annotation] = value
# now get any container specific annotations
# for this container
if container_specific_annotations:
k8s_container_name = k8s_info.get('k8s_container_name', '')
if k8s_container_name in all_annotations:
# get the annotations for this container
container_annotations = all_annotations[k8s_container_name]
# sanity check to make sure annotations are either a JsonObject or dict
if not isinstance( container_annotations, JsonObject ) and not isinstance( container_annotations, dict ):
self._logger.warning( "Unexpected configuration found in annotations for pod '%s/%s'. Expected a dict for configuration of container '%s', but got a '%s' instead. No container specific configuration options applied." % ( pod.namespace, pod.name, k8s_container_name, str( type(container_annotations) ) ),
limit_once_per_x_secs=300,
limit_key='k8s-invalid-container-config-%s' % cid)
container_annotations = {}
else:
self._logger.warning( "Couldn't map container '%s' to pod '%s/%s'. Logging limited metadata from docker container labels instead." % ( short_cid, pod_namespace, pod_name ),
limit_once_per_x_secs=300,
limit_key='k8s-docker-mapping-%s' % cid)
container_attributes['pod_name'] = pod_name
container_attributes['pod_namespace'] = pod_namespace
container_attributes['pod_uid'] = k8s_info.get('pod_uid', 'invalid_uid')
container_attributes['k8s_container_name'] = k8s_info.get('k8s_container_name', 'invalid_container_name')
else:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "no k8s info for container %s" % short_cid )
if 'log_path' in info and info['log_path']:
result = self.__create_log_config( parser=parser, path=info['log_path'], attributes=container_attributes, parse_as_json=self.__parse_json )
result['rename_logfile'] = '/docker/%s.log' % info['name']
# This is for a hack to prevent the original log file name from being added to the attributes.
if self.__use_v2_attributes and not self.__use_v1_and_v2_attributes:
result['rename_no_original'] = True
# apply common annotations first
annotations = common_annotations
# set/override any container specific annotations
annotations.update( container_annotations )
# ignore include/exclude options which have special
# handling in the log_config verification that expects a different type than the one used in the nnotations
skip_keys = [ 'include', 'exclude' ]
# list of config items that cannot be updated via annotations
invalid_keys = [ 'path', 'lineGroupers' ]
# set config items, ignoring invalid options and taking care to
# handle attributes
for key, value in annotations.iteritems():
if key in skip_keys:
continue
if key in invalid_keys:
self._logger.warning( "Invalid key '%s' found in annotation config for '%s/%s'. Configuration of '%s' is not currently supported via annotations and has been ignored." % (key, pod_namespace, pod_name, key ),
limit_once_per_x_secs=300,
limit_key='k8s-invalid-annotation-config-key-%s' % key)
continue
# we need to make sure we update attributes rather
# than overriding the entire dict, otherwise we'll override pod_name, namespace etc
if key == 'attributes':
if 'attributes' not in result:
result['attributes'] = {}
attrs = result['attributes']
attrs.update( value )
# we also need to override the top level parser value if attributes['parser'] is set
if 'parser' in attrs:
result['parser'] = attrs['parser']
continue
elif key == 'rename_logfile':
# rename logfile supports string substitions
# so update value if necessary
template = Template( value )
value = template.safe_substitute( rename_vars )
# everything else is added to the log_config result as is
result[key] = value
return result
def __get_docker_logs( self, containers, k8s_cache ):
"""Returns a list of dicts containing the container id, stream, and a log_config
for each container in the 'containers' param.
"""
result = []
attributes = self.__get_base_attributes()
prefix = self.__log_prefix + '-'
for cid, info in containers.iteritems():
log_config = self.__get_log_config_for_container( cid, info, k8s_cache, attributes )
if log_config:
result.append( { 'cid': cid, 'stream': 'raw', 'log_config': log_config } )
return result
class KubernetesMonitor( ScalyrMonitor ):
"""
# Kubernetes Monitor
This monitor is based of the docker_monitor plugin, and uses the raw logs mode of the docker
plugin to send Kubernetes logs to Scalyr. It also reads labels from the Kubernetes API and
associates them with the appropriate logs.
## Log Config via Annotations
The logs collected by the Kubernetes monitor can be configured via k8s pod annotations.
The monitor examines all annotations on all pods, and for any annotation that begins with the
prefix log.config.scalyr.com/, it extracts the
entries (minus the prefix) and maps them to the log_config stanza for that pod's containers.
The mapping is described below.
The following fields can be configured a log via pod annotations:
* parser
* attributes
* sampling_rules
* rename_logfile
* redaction_rules
These behave in the same way as specified in the main [Scalyr help
docs](https://www.scalyr.com/help/scalyr-agent#logUpload). The following configuration
fields behave differently when configured via k8s annotations:
* exclude (see below)
* lineGroupers (not supported at all)
* path (the path is always fixed for k8s container logs)
### Excluding Logs
Containers and pods can be specifically included/excluded from having their logs collected and
sent to Scalyr. Unlike the normal log_config `exclude` option which takes an array of log path
exclusion globs, annotations simply support a Boolean true/false for a given container/pod.
Both `include` and `exclude` are supported, with `include` always overriding `exclude` if both
are set. e.g.
log.config.scalyr.com/exclude: true
has the same effect as
log.config.scalyr.com/include: false
By default the agent monitors the logs of all pods/containers, and you have to manually exclude
pods/containers you don't want. You can also set `k8s_include_all_containers: false` in the
kubernetes_monitor monitor config section of `agent.d/docker.json`, in which case all containers are
excluded by default and have to be manually included.
### Specifying Config Options
The Kubernetes monitor takes the string value of each annotation and maps it to a dict, or
array value according to the following format:
Values separated by a period are mapped to dict keys e.g. if one annotation on a given pod was
specified as:
log.config.scalyr.com/attributes.parser: accessLog
Then this would be mapped to the following dict, which would then be applied to the log config
for all containers in that pod:
{ "attributes": { "parser": "accessLog" } }
Arrays can be specified by using one or more digits as the key, e.g. if the annotation was
log.config.scalyr.com/sampling_rules.0.match_expression: INFO
log.config.scalyr.com/sampling_rules.0.sampling_rate: 0.1
log.config.scalyr.com/sampling_rules.1.match_expression: FINE
log.config.scalyr.com/sampling_rules.1.sampling_rate: 0
This will be mapped to the following structure:
{ "sampling_rules":
[
{ "match_expression": "INFO", "sampling_rate": 0.1 },
{ "match_expression": "FINE", "sampling_rate": 0 }
]
}
Array keys are sorted by numeric order before processing and unique objects need to have
different digits as the array key. If a sub-key has an identical array key as a previously seen
sub-key, then the previous value of the sub-key is overwritten
There is no guarantee about the order of processing for items with the same numeric array key,
so if the config was specified as:
log.config.scalyr.com/sampling_rules.0.match_expression: INFO
log.config.scalyr.com/sampling_rules.0.match_expression: FINE
It is not defined or guaranteed what the actual value will be (INFO or FINE).
### Applying config options to specific containers in a pod
If a pod has multiple containers and you only want to apply log configuration options to a
specific container you can do so by prefixing the option with the container name, e.g. if you
had a pod with two containers `nginx` and `helper1` and you wanted to exclude `helper1` logs you
could specify the following annotation:
log.config.scalyr.com/helper1.exclude: true
Config items specified without a container name are applied to all containers in the pod, but
container specific settings will override pod-level options, e.g. in this example:
log.config.scalyr.com/exclude: true
log.config.scalyr.com/nginx.include: true
All containers in the pod would be excluded *except* for the nginx container, which is included.
This technique is applicable for all log config options, not just include/exclude. For
example you could set the line sampling rules for all containers in a pod, but use a different set
of line sampling rules for one specific container in the pod if needed.
### Dynamic Updates
Currently all annotation config options except `exclude: true`/`include: false` can be
dynamically updated using the `kubectl annotate` command.
For `exclude: true`/`include: false` once a pod/container has started being logged, then while the
container is still running, there is currently no way to dynamically start/stop logging of that
container using annotations without updating the config yaml, and applying the updated config to the
cluster.
"""
def __get_socket_file( self ):
"""Gets the Docker API socket file and validates that it is a UNIX socket
"""
#make sure the API socket exists and is a valid socket
api_socket = self._config.get( 'api_socket' )
try:
st = os.stat( api_socket )
if not stat.S_ISSOCK( st.st_mode ):
raise Exception()
except:
raise Exception( "The file '%s' specified by the 'api_socket' configuration option does not exist or is not a socket.\n\tPlease make sure you have mapped the docker socket from the host to this container using the -v parameter.\n\tNote: Due to problems Docker has mapping symbolic links, you should specify the final file and not a path that contains a symbolic link, e.g. map /run/docker.sock rather than /var/run/docker.sock as on many unices /var/run is a symbolic link to the /run directory." % api_socket )
return api_socket
def _initialize( self ):
data_path = ""
log_path = ""
host_hostname = ""
# Since getting metrics from Docker takes a non-trivial amount of time, we will deduct the time spent
# in gathering the metric samples from the time we should sleep so that we do gather a sample once every
# sample_interval_secs
self._adjust_sleep_by_gather_time = True
# Override the default value for the rate limit for writing the metric logs. We override it to set no limit
# because it is fairly difficult to bound this since the log will emit X metrics for every pod being monitored.
self._log_write_rate = self._config.get('monitor_log_write_rate', convert_to=int, default=-1)
self._log_max_write_burst = self._config.get('monitor_log_max_write_burst', convert_to=int, default=-1)
if self._global_config:
data_path = self._global_config.agent_data_path
log_path = self._global_config.agent_log_path
if self._global_config.server_attributes:
if 'serverHost' in self._global_config.server_attributes:
host_hostname = self._global_config.server_attributes['serverHost']
else:
self._logger.info( "no server host in server attributes" )
else:
self._logger.info( "no server attributes in global config" )
# The namespace whose logs we should not collect.
self.__namespaces_to_ignore = []
for x in self._config.get('k8s_ignore_namespaces').split():
self.__namespaces_to_ignore.append(x.strip())
self.__ignore_pod_sandboxes = self._config.get('k8s_ignore_pod_sandboxes')
self.__socket_file = self.__get_socket_file()
self.__docker_api_version = self._config.get( 'docker_api_version' )
self.__k8s_api_url = self._config.get('k8s_api_url')
self.__client = DockerClient( base_url=('unix:/%s'%self.__socket_file), version=self.__docker_api_version )
self.__metric_fetcher = DockerMetricFetcher(self.__client, self._config.get('docker_max_parallel_stats'),
self._logger)
self.__glob_list = self._config.get( 'container_globs' )
self.__include_all = self._config.get( 'k8s_include_all_containers' )
self.__report_container_metrics = self._config.get('report_container_metrics')
self.__report_k8s_metrics = self._config.get('report_k8s_metrics') and self.__report_container_metrics
# Object for talking to the kubelet server running on this localhost. This is used to gather metrics only
# available via the kubelet.
self.__kubelet_api = None
self.__gather_k8s_pod_info = self._config.get('gather_k8s_pod_info')
# Including controller information for every event uploaded about a pod (cluster name, controller name,
# controller labels)
self.__include_controller_info = self._config.get('include_deployment_info', convert_to=bool, default=False)
self.__container_checker = None
if self._config.get('log_mode') != 'syslog':
self.__container_checker = ContainerChecker( self._config, self._logger, self.__socket_file,
self.__docker_api_version, host_hostname, data_path, log_path,
self.__include_all, self.__include_controller_info,
self.__namespaces_to_ignore, self.__ignore_pod_sandboxes )
# Metrics provided by the kubelet API.
self.__k8s_pod_network_metrics = {
'k8s.pod.network.rx_bytes': 'rxBytes',
'k8s.pod.network.rx_errors': 'rxErrors',
'k8s.pod.network.tx_bytes': 'txBytes',
'k8s.pod.network.tx_errors': 'txErrors',
}
# Metrics provide by the kubelet API.
self.__k8s_node_network_metrics = {
'k8s.node.network.rx_bytes': 'rxBytes',
'k8s.node.network.rx_errors': 'rxErrors',
'k8s.node.network.tx_bytes': 'txBytes',
'k8s.node.network.tx_errors': 'txErrors',
}
# All the docker. metrics are provided by the docker API.
self.__network_metrics = self.__build_metric_dict( 'docker.net.', [
"rx_bytes",
"rx_dropped",
"rx_errors",
"rx_packets",
"tx_bytes",
"tx_dropped",
"tx_errors",
"tx_packets",
])
self.__mem_stat_metrics = self.__build_metric_dict( 'docker.mem.stat.', [
"total_pgmajfault",
"cache",
"mapped_file",
"total_inactive_file",
"pgpgout",
"rss",
"total_mapped_file",
"writeback",
"unevictable",
"pgpgin",
"total_unevictable",
"pgmajfault",
"total_rss",
"total_rss_huge",
"total_writeback",
"total_inactive_anon",
"rss_huge",
"hierarchical_memory_limit",
"total_pgfault",
"total_active_file",
"active_anon",
"total_active_anon",
"total_pgpgout",
"total_cache",
"inactive_anon",
"active_file",
"pgfault",
"inactive_file",
"total_pgpgin"
])
self.__mem_metrics = self.__build_metric_dict( 'docker.mem.', [
"max_usage",
"usage",
"fail_cnt",
"limit"
])
self.__cpu_usage_metrics = self.__build_metric_dict( 'docker.cpu.', [
"usage_in_usermode",
"total_usage",
"usage_in_kernelmode"
])
self.__cpu_throttling_metrics = self.__build_metric_dict( 'docker.cpu.throttling.', [
"periods",
"throttled_periods",
"throttled_time"
])
def set_log_watcher( self, log_watcher ):
"""Provides a log_watcher object that monitors can use to add/remove log files
"""
if self.__container_checker:
self.__container_checker.set_log_watcher( log_watcher, self )
def __build_metric_dict( self, prefix, names ):
result = {}
for name in names:
result["%s%s"%(prefix, name)] = name
return result
def __log_metrics( self, monitor_override, metrics_to_emit, metrics, extra=None ):
if metrics is None:
return
for key, value in metrics_to_emit.iteritems():
if value in metrics:
# Note, we do a bit of a hack to pretend the monitor's name include the container/pod's name. We take this
# approach because the Scalyr servers already have some special logic to collect monitor names and ids
# to help auto generate dashboards. So, we want a monitor name like `docker_monitor(foo_container)`
# for each running container.
self._logger.emit_value( key, metrics[value], extra, monitor_id_override=monitor_override )
def __log_network_interface_metrics( self, container, metrics, interface=None, k8s_extra={} ):
""" Logs network interface metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: interface - an optional interface value to associate with each metric value emitted
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
extra = None
if interface:
if k8s_extra is None:
extra = {}
else:
extra = k8s_extra.copy()
extra['interface'] = interface
self.__log_metrics( container, self.__network_metrics, metrics, extra )
def __log_memory_stats_metrics( self, container, metrics, k8s_extra ):
""" Logs memory stats metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
if 'stats' in metrics:
self.__log_metrics( container, self.__mem_stat_metrics, metrics['stats'], k8s_extra )
self.__log_metrics( container, self.__mem_metrics, metrics, k8s_extra )
def __log_cpu_stats_metrics( self, container, metrics, k8s_extra ):
""" Logs cpu stats metrics
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
if 'cpu_usage' in metrics:
cpu_usage = metrics['cpu_usage']
if 'percpu_usage' in cpu_usage:
percpu = cpu_usage['percpu_usage']
count = 1
if percpu:
for usage in percpu:
# Use dev for the CPU number since it is a known tag for Scalyr to use in delta computation.
extra = { 'dev' : count }
if k8s_extra is not None:
extra.update(k8s_extra)
self._logger.emit_value( 'docker.cpu.usage', usage, extra, monitor_id_override=container )
count += 1
self.__log_metrics( container, self.__cpu_usage_metrics, cpu_usage, k8s_extra )
if 'system_cpu_usage' in metrics:
self._logger.emit_value( 'docker.cpu.system_cpu_usage', metrics['system_cpu_usage'], k8s_extra,
monitor_id_override=container )
if 'throttling_data' in metrics:
self.__log_metrics( container, self.__cpu_throttling_metrics, metrics['throttling_data'], k8s_extra )
def __log_json_metrics( self, container, metrics, k8s_extra ):
""" Log docker metrics based on the JSON response returned from querying the Docker API
@param: container - name of the container the log originated from
@param: metrics - a dict of metrics keys/values to emit
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
for key, value in metrics.iteritems():
if value is None:
continue
if key == 'networks':
for interface, network_metrics in value.iteritems():
self.__log_network_interface_metrics( container, network_metrics, interface, k8s_extra=k8s_extra )
elif key == 'network':
self.__log_network_interface_metrics( container, value, k8s_extra )
elif key == 'memory_stats':
self.__log_memory_stats_metrics( container, value, k8s_extra )
elif key == 'cpu_stats':
self.__log_cpu_stats_metrics( container, value, k8s_extra )
def __gather_metrics_from_api_for_container( self, container, k8s_extra ):
""" Query the Docker API for container metrics
@param: container - name of the container to query
@param: k8s_extra - extra k8s specific key/value pairs to associate with each metric value emitted
"""
result = self.__metric_fetcher.get_metrics(container)
if result is not None:
self.__log_json_metrics( container, result, k8s_extra )
def __build_k8s_controller_info( self, pod ):
"""
Builds a dict containing information about the controller settings for a given pod
@param: pod - a PodInfo object containing basic information (namespace/name) about the pod to query
@return: a dict containing the controller name for the controller running
the specified pod, or an empty dict if the pod is not part of a controller
"""
k8s_extra = {}
if pod is not None:
# default key and controlle name
key = 'k8s-controller'
name = 'none'
# check if we have a controller, and if so use it
controller = pod.controller
if controller is not None:
# use one of the predefined key if this is a controller kind we know about
if controller.kind in _CONTROLLER_KEYS:
key = _CONTROLLER_KEYS[controller.kind]
name = controller.name
k8s_extra = {
key: name
}
return k8s_extra
def __get_k8s_controller_info( self, container ):
"""
Gets information about the kubernetes controller of a given container
@param: container - a dict containing information about a container, returned by _get_containers
"""
k8s_info = container.get( 'k8s_info', {} )
pod = k8s_info.get( 'pod_info', None )
if pod is None:
return None
return self.__build_k8s_controller_info( pod )
def __get_cluster_info( self, cluster_name ):
""" returns a dict of values about the cluster """
cluster_info = {}
if self.__include_controller_info and cluster_name is not None:
cluster_info['k8s-cluster'] = cluster_name
return cluster_info
def __gather_metrics_from_api( self, containers, cluster_name ):
cluster_info = self.__get_cluster_info( cluster_name )
for cid, info in containers.iteritems():
self.__metric_fetcher.prefetch_metrics(info['name'])
for cid, info in containers.iteritems():
k8s_extra = {}
if self.__include_controller_info:
k8s_extra = self.__get_k8s_controller_info( info )
if k8s_extra is not None:
k8s_extra.update( cluster_info )
k8s_extra.update({'pod_uid': info['name']})
self.__gather_metrics_from_api_for_container( info['name'], k8s_extra )
def __gather_k8s_metrics_for_node( self, node, extra ):
"""
Gathers metrics from a Kubelet API response for a specific pod
@param: node_metrics - A JSON Object from a response to a Kubelet API query
@param: extra - Extra fields to append to each metric
"""
name = node.get( "nodeName", None )
if name is None:
return
node_extra = {
'node_name': name
}
node_extra.update(extra)
for key, metrics in node.iteritems():
if key == 'network':
self.__log_metrics( name, self.__k8s_node_network_metrics, metrics, node_extra )
def __gather_k8s_metrics_for_pod( self, pod_metrics, pod_info, k8s_extra ):
"""
Gathers metrics from a Kubelet API response for a specific pod
@param: pod_metrics - A JSON Object from a response to a Kubelet API query
@param: pod_info - A PodInfo structure regarding the pod in question
@param: k8s_extra - Extra k8s specific fields to append to each metric
"""
extra = {
'pod_uid': pod_info.uid
}
extra.update( k8s_extra )
for key, metrics in pod_metrics.iteritems():
if key == 'network':
self.__log_metrics( pod_info.uid, self.__k8s_pod_network_metrics, metrics, extra )
def __gather_k8s_metrics_from_kubelet( self, containers, kubelet_api, cluster_name ):
"""
Gathers k8s metrics from a response to a stats query of the Kubelet API
@param: containers - a dict returned by _get_containers with info for all containers we are interested in
@param: kubelet_api - a KubeletApi object for querying the KubeletApi
@param: cluster_name - the name of the k8s cluster
"""
cluster_info = self.__get_cluster_info( cluster_name )
# get set of pods we are interested in querying
pod_info = {}
for cid, info in containers.iteritems():
k8s_info = info.get( 'k8s_info', {} )
pod = k8s_info.get( 'pod_info', None )
if pod is None:
continue
pod_info[pod.uid] = pod
try:
stats = kubelet_api.query_stats()
node = stats.get( 'node', {} )
if node:
self.__gather_k8s_metrics_for_node( node, cluster_info )
pods = stats.get( 'pods', [] )
# process pod stats, skipping any that are not in our list
# of pod_info
for pod in pods:
pod_ref = pod.get( 'podRef', {} )
pod_uid = pod_ref.get( 'uid', '<invalid>' )
if pod_uid not in pod_info:
continue
info = pod_info[pod_uid]
controller_info = {}
if self.__include_controller_info:
controller_info = self.__build_k8s_controller_info( info )
controller_info.update( cluster_info )
self.__gather_k8s_metrics_for_pod( pod, info, controller_info )
except KubeletApiException, e:
self._logger.warning( "Error querying kubelet API: %s" % str( e ),
limit_once_per_x_secs=300,
limit_key='kubelet-api-query' )
def gather_sample( self ):
k8s_cache = None
if self.__container_checker:
k8s_cache = self.__container_checker.k8s_cache
cluster_name = None
if k8s_cache is not None:
cluster_name = k8s_cache.get_cluster_name()
# gather metrics
containers = None
if self.__report_container_metrics:
containers = _get_containers(self.__client, ignore_container=None, glob_list=self.__glob_list,
k8s_cache=k8s_cache, k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
try:
if containers:
if self.__report_container_metrics:
self._logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Attempting to retrieve metrics for %d containers' % len(containers))
self.__gather_metrics_from_api( containers, cluster_name )
if self.__report_k8s_metrics:
self._logger.log(scalyr_logging.DEBUG_LEVEL_3, 'Attempting to retrieve k8s metrics %d' % len(containers))
self.__gather_k8s_metrics_from_kubelet( containers, self.__kubelet_api, cluster_name )
except Exception, e:
self._logger.exception( "Unexpected error logging metrics: %s" %( str(e) ) )
if self.__gather_k8s_pod_info:
cluster_info = self.__get_cluster_info( cluster_name )
containers = _get_containers( self.__client, only_running_containers=False, k8s_cache=k8s_cache,
k8s_include_by_default=self.__include_all,
k8s_namespaces_to_exclude=self.__namespaces_to_ignore)
for cid, info in containers.iteritems():
try:
extra = info.get( 'k8s_info', {} )
extra['status'] = info.get('status', 'unknown')
if self.__include_controller_info:
controller = self.__get_k8s_controller_info( info )
extra.update( controller )
extra.update( cluster_info )
namespace = extra.get( 'pod_namespace', 'invalid-namespace' )
self._logger.emit_value( 'docker.container_name', info['name'], extra, monitor_id_override="namespace:%s" % namespace )
except Exception, e:
self._logger.error( "Error logging container information for %s: %s" % (_get_short_cid( cid ), str( e )) )
if self.__container_checker:
namespaces = self.__container_checker.get_k8s_data()
for namespace, pods in namespaces.iteritems():
for pod_name, pod in pods.iteritems():
try:
extra = { 'pod_uid': pod.uid,
'pod_namespace': pod.namespace,
'node_name': pod.node_name }
if self.__include_controller_info:
controller_info = self.__build_k8s_controller_info( pod )
if controller_info:
extra.update( controller_info )
extra.update( cluster_info )
self._logger.emit_value( 'k8s.pod', pod.name, extra, monitor_id_override="namespace:%s" % pod.namespace )
except Exception, e:
self._logger.error( "Error logging pod information for %s: %s" % (pod.name, str( e )) )
def run( self ):
# workaround a multithread initialization problem with time.strptime
# see: http://code-trick.com/python-bug-attribute-error-_strptime/
# we can ignore the result
tm = time.strptime( "2016-08-29", "%Y-%m-%d" )
if self.__container_checker:
self.__container_checker.start()
try:
# check to see if the user has manually specified a cluster name, and if so then
# force enable 'Starbuck' features
if self.__container_checker and self.__container_checker.k8s_cache.get_cluster_name() is not None:
self._logger.log( scalyr_logging.DEBUG_LEVEL_1, "Cluster name detected, enabling k8s metric reporting and controller information" )
self.__include_controller_info = True
self.__report_k8s_metrics = self.__report_container_metrics
if self.__report_k8s_metrics:
k8s = KubernetesApi(k8s_api_url=self.__k8s_api_url)
self.__kubelet_api = KubeletApi( k8s )
except Exception, e:
self._logger.error( "Error creating KubeletApi object. Kubernetes metrics will not be logged: %s" % str( e ) )
self.__report_k8s_metrics = False
global_log.info('kubernetes_monitor parameters: ignoring namespaces: %s, report_controllers %s, '
'report_metrics %s' % (','.join(self.__namespaces_to_ignore),
str(self.__include_controller_info),
str(self.__report_container_metrics)))
ScalyrMonitor.run( self )
def stop(self, wait_on_join=True, join_timeout=5):
#stop the main server
ScalyrMonitor.stop( self, wait_on_join=wait_on_join, join_timeout=join_timeout )
if self.__container_checker is not None:
self.__container_checker.stop( wait_on_join, join_timeout )
if self.__metric_fetcher is not None:
self.__metric_fetcher.stop()
| 51.106667
| 523
| 0.634251
|
f6f8c36315ab14609c40584e7864382cf9687ef1
| 10,795
|
py
|
Python
|
bips/workflows/workflow6.py
|
FCP-INDI/BrainImagingPipelines
|
2b0da2b50814cc685f15fefbae8144624308ebfc
|
[
"Apache-2.0"
] | 1
|
2015-07-15T19:48:09.000Z
|
2015-07-15T19:48:09.000Z
|
bips/workflows/workflow6.py
|
FCP-INDI/BrainImagingPipelines
|
2b0da2b50814cc685f15fefbae8144624308ebfc
|
[
"Apache-2.0"
] | null | null | null |
bips/workflows/workflow6.py
|
FCP-INDI/BrainImagingPipelines
|
2b0da2b50814cc685f15fefbae8144624308ebfc
|
[
"Apache-2.0"
] | null | null | null |
import os
from .base import MetaWorkflow, load_config, register_workflow
from ..utils.reportsink.io import ReportSink
from traits.api import HasTraits, Directory, Bool, Button
import traits.api as traits
from .scripts.u0a14c5b5899911e1bca80023dfa375f2.workflow1 import get_dataflow
"""
Part 1: Define a MetaWorkflow
"""
desc = """
Compare Realignment Nodes workflow
=====================================
"""
mwf = MetaWorkflow()
mwf.uuid = '79755b1e8b1a11e1a2ae001e4fb1404c'
mwf.tags = ['motion_correction', 'test', 'nipy', 'fsl', 'spm']
mwf.help = desc
"""
Part 2: Define the config class & create_config function
"""
class config(HasTraits):
uuid = traits.Str(desc="UUID")
desc = traits.Str(desc='Workflow description')
# Directories
working_dir = Directory(mandatory=True, desc="Location of the Nipype working directory")
base_dir = Directory(os.path.abspath('.'),exists=True, desc='Base directory of data. (Should be subject-independent)')
sink_dir = Directory(mandatory=True, desc="Location where the BIP will store the results")
field_dir = Directory(exists=True, desc="Base directory of field-map data (Should be subject-independent) \
Set this value to None if you don't want fieldmap distortion correction")
crash_dir = Directory(mandatory=False, desc="Location to store crash files")
json_sink = Directory(mandatory=False, desc= "Location to store json_files")
surf_dir = Directory(mandatory=True, desc= "Freesurfer subjects directory")
# Execution
run_using_plugin = Bool(False, usedefault=True, desc="True to run pipeline with plugin, False to run serially")
plugin = traits.Enum("PBS", "PBSGraph","MultiProc", "SGE", "Condor",
usedefault=True,
desc="plugin to use, if run_using_plugin=True")
plugin_args = traits.Dict({"qsub_args": "-q many"},
usedefault=True, desc='Plugin arguments.')
test_mode = Bool(False, mandatory=False, usedefault=True,
desc='Affects whether where and if the workflow keeps its \
intermediary files. True to keep intermediary files. ')
# Subjects
subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
desc="Subject id's. Note: These MUST match the subject id's in the \
Freesurfer directory. For simplicity, the subject id's should \
also match with the location of individual functional files.")
func_template = traits.String('%s/functional.nii.gz')
run_datagrabber_without_submitting = Bool(True, usedefault=True)
# Motion Correction
do_slicetiming = Bool(True, usedefault=True, desc="Perform slice timing correction")
SliceOrder = traits.List(traits.Int)
TR = traits.Float(mandatory=True, desc = "TR of functional")
# Buttons
check_func_datagrabber = Button("Check")
def _check_func_datagrabber_fired(self):
subs = self.subjects
for s in subs:
if not os.path.exists(os.path.join(self.base_dir,self.func_template % s)):
print "ERROR", os.path.join(self.base_dir,self.func_template % s), "does NOT exist!"
break
else:
print os.path.join(self.base_dir,self.func_template % s), "exists!"
def create_config():
c = config()
c.uuid = mwf.uuid
c.desc = mwf.help
return c
mwf.config_ui = create_config
"""
Part 3: Create a View
"""
def create_view():
from traitsui.api import View, Item, Group, CSVListEditor
from traitsui.menu import OKButton, CancelButton
view = View(Group(Item(name='uuid', style='readonly'),
Item(name='desc', style='readonly'),
label='Description', show_border=True),
Group(Item(name='working_dir'),
Item(name='sink_dir'),
Item(name='crash_dir'),
Item(name='json_sink'),
label='Directories', show_border=True),
Group(Item(name='run_using_plugin'),
Item(name='plugin', enabled_when="run_using_plugin"),
Item(name='plugin_args', enabled_when="run_using_plugin"),
Item(name='test_mode'),
label='Execution Options', show_border=True),
Group(Item(name='subjects', editor=CSVListEditor()),
Item(name='base_dir', ),
Item(name='func_template'),
Item(name='check_func_datagrabber'),
Item(name='run_datagrabber_without_submitting'),
label='Subjects', show_border=True),
Group(Item(name='TR'),
Item(name='SliceOrder', editor=CSVListEditor()),
label='Motion Correction', show_border=True),
buttons = [OKButton, CancelButton],
resizable=True,
width=1050)
return view
mwf.config_view = create_view
"""
Part 4: Workflow Construction
"""
def plot_trans(nipy1,nipy2,fsl,spm):
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
import os
fname=os.path.abspath('translations.png')
plt.subplot(411);plt.plot(np.genfromtxt(nipy1)[:,3:])
plt.ylabel('nipy')
plt.subplot(412);plt.plot(np.genfromtxt(nipy2)[:,3:])
plt.ylabel('nipy_no_t')
plt.subplot(413);plt.plot(np.genfromtxt(fsl)[:,3:])
plt.ylabel('fsl')
plt.subplot(414);plt.plot(np.genfromtxt(spm)[:,:3])
plt.ylabel('spm')
plt.savefig(fname)
plt.close()
return fname
def plot_rot(nipy1,nipy2,fsl,spm):
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
import os
fname=os.path.abspath('rotations.png')
plt.subplot(411);plt.plot(np.genfromtxt(nipy1)[:,:3])
plt.ylabel('nipy')
plt.subplot(412);plt.plot(np.genfromtxt(nipy2)[:,:3])
plt.ylabel('nipy_no_t')
plt.subplot(413);plt.plot(np.genfromtxt(nipy2)[:,:3])
plt.ylabel('fsl')
plt.subplot(414);plt.plot(np.genfromtxt(spm)[:,3:])
plt.ylabel('spm')
plt.savefig(fname)
plt.close()
return fname
def corr_mat(nipy1,nipy2,fsl,spm):
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
fname=os.path.abspath('correlation.png')
allparams = np.hstack((np.genfromtxt(nipy1),
np.genfromtxt(nipy2),
np.genfromtxt(spm)[:,[3,4,5,0,1,2]]))
plt.imshow(abs(np.corrcoef(allparams.T)), interpolation='nearest'); plt.colorbar()
plt.savefig(fname)
plt.close()
return fname
def compare_workflow(c, name='compare_realignments'):
#import nipype.interfaces.matlab as mlab
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.nipy import FmriRealign4d
import nipype.interfaces.fsl as fsl
import nipype.interfaces.spm as spm
#mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
#mlab.MatlabCommand.set_default_paths('/software/spm8_4290')
workflow =pe.Workflow(name=name)
infosource = pe.Node(util.IdentityInterface(fields=['subject_id']),
name='subject_names')
if c.test_mode:
infosource.iterables = ('subject_id', [c.subjects[0]])
else:
infosource.iterables = ('subject_id', c.subjects)
datagrabber = get_dataflow(c)
workflow.connect(infosource, 'subject_id', datagrabber, 'subject_id')
realign_nipy = pe.Node(interface=FmriRealign4d(), name='realign_nipy')
realign_nipy.inputs.tr = c.TR
realign_nipy.inputs.slice_order = c.SliceOrder
realign_nipy.inputs.time_interp=True
realign_nipy_no_t = pe.Node(interface=FmriRealign4d(), name='realign_nipy_no_t')
realign_nipy_no_t.inputs.tr = c.TR
#realign_nipy_no_t.inputs.slice_order = c.SliceOrder
realign_mflirt = pe.MapNode(interface=fsl.MCFLIRT(save_plots=True), name='mcflirt', iterfield=['in_file'])
realign_spm = pe.MapNode(interface=spm.Realign(), name='spm', iterfield=['in_files'])
report = pe.Node(interface=ReportSink(orderfields=['Introduction','Translations','Rotations','Correlation_Matrix']), name='write_report')
report.inputs.Introduction = 'Comparing realignment nodes'
report.inputs.base_directory= os.path.join(c.sink_dir)
report.inputs.report_name = 'Comparing_Motion'
rot = pe.MapNode(util.Function(input_names=['nipy1','nipy2','fsl','spm'],output_names=['fname'],function=plot_rot),name='plot_rot',
iterfield=['nipy1','nipy2','fsl','spm'])
trans = pe.MapNode(util.Function(input_names=['nipy1','nipy2','fsl','spm'],output_names=['fname'],function=plot_trans),name='plot_trans',
iterfield=['nipy1','nipy2','fsl','spm'])
coef = pe.MapNode(util.Function(input_names=['nipy1','nipy2','fsl','spm'],output_names=['fname'],function=corr_mat),name='cor_mat',
iterfield=['nipy1','nipy2','fsl','spm'])
workflow.connect(datagrabber, 'func', realign_nipy, 'in_file')
workflow.connect(datagrabber, 'func', realign_nipy_no_t, 'in_file')
workflow.connect(datagrabber, 'func', realign_mflirt, 'in_file')
workflow.connect(datagrabber, 'func', realign_spm, 'in_files')
workflow.connect(realign_nipy, 'par_file', rot, 'nipy1')
workflow.connect(realign_nipy_no_t, 'par_file', rot, 'nipy2')
workflow.connect(realign_spm, 'realignment_parameters', rot, 'spm')
workflow.connect(realign_mflirt, 'par_file', rot, 'fsl')
workflow.connect(realign_nipy, 'par_file', trans, 'nipy1')
workflow.connect(realign_nipy_no_t, 'par_file', trans, 'nipy2')
workflow.connect(realign_spm, 'realignment_parameters', trans, 'spm')
workflow.connect(realign_mflirt, 'par_file', trans, 'fsl')
workflow.connect(realign_nipy, 'par_file', coef, 'nipy1')
workflow.connect(realign_nipy_no_t, 'par_file', coef, 'nipy2')
workflow.connect(realign_spm, 'realignment_parameters', coef, 'spm')
workflow.connect(realign_mflirt, 'par_file', coef, 'fsl')
workflow.connect(trans, 'fname', report, 'Translations')
workflow.connect(rot, 'fname', report, 'Rotations')
workflow.connect(coef, 'fname', report, 'Correlation_Matrix')
workflow.connect(infosource,'subject_id',report,'container')
return workflow
mwf.workflow_function = compare_workflow
"""
Part 5: Define the main function
"""
def main(config_file):
c = load_config(config_file, create_config)
compare = compare_workflow(c)
compare.base_dir = c.working_dir
if c.run_using_plugin:
compare.run(plugin=c.plugin, plugin_args=c.plugin_args)
else:
compare.run()
mwf.workflow_main_function = main
"""
Part 6: Register the Workflow
"""
register_workflow(mwf)
| 38.41637
| 141
| 0.668921
|
22baa4f819aa880990c6f3759110cfaf27f35664
| 1,735
|
py
|
Python
|
tests/test_maps.py
|
potsdam-language-vision-2021/eyebot-1.0
|
dd2f618a8357ef16c497d06b8b19ab38d628645a
|
[
"MIT"
] | null | null | null |
tests/test_maps.py
|
potsdam-language-vision-2021/eyebot-1.0
|
dd2f618a8357ef16c497d06b8b19ab38d628645a
|
[
"MIT"
] | 46
|
2021-04-30T15:28:40.000Z
|
2021-08-21T15:26:34.000Z
|
tests/test_maps.py
|
potsdam-language-vision-2021/eyebot-1.0
|
dd2f618a8357ef16c497d06b8b19ab38d628645a
|
[
"MIT"
] | 1
|
2021-08-07T19:41:12.000Z
|
2021-08-07T19:41:12.000Z
|
import unittest
from matplotlib import pyplot as plt
from avatar.mapworld.maps import ADEMap
class ADEMapTestCase(unittest.TestCase):
def test_print_mapping(self):
# Create map with five rooms on a four times three grid with no repetitions
map = ADEMap(n=4, m=4, n_rooms=8, types_to_repeat=[2, 2])
map.print_mapping()
def test_plot_graph(self):
map = ADEMap(n=4, m=3, n_rooms=10)
map.print_mapping()
map.plot_graph()
plt.show()
def test_to_json(self):
"""
For example:
{
"directed": false,
"multigraph": false,
"graph": {},
"nodes": [
{
"base_type": "outdoor",
"type": "c/casino/outdoor",
"target": false,
"instance": "c/casino/outdoor/ADE_train_00005214.jpg",
"id": [
3,
1
]
},
{
"base_type": "indoor",
"type": "h/hunting_lodge/indoor",
"target": false,
"instance": "h/hunting_lodge/indoor/ADE_train_00009734.jpg",
"id": [
2,
1
]
},
...
],
"links": [
{
"source": [
3,
1
],
"target": [
2,
1
]
},
...
]
}
"""
map = ADEMap(n=4, m=3, n_rooms=5)
map_json = map.to_json()
print(map_json)
if __name__ == '__main__':
unittest.main()
| 23.767123
| 83
| 0.402305
|
2fcee5e3ad0dcd6155ab38ed3c8dc6828116abd1
| 1,954
|
py
|
Python
|
crypto_compare/client.py
|
BoTreeConsultingTeam/crypto_compare
|
048ac3ef34ccfc5100ce5310318787fb222f55da
|
[
"MIT"
] | 28
|
2017-08-30T18:05:20.000Z
|
2022-03-31T10:28:38.000Z
|
crypto_compare/client.py
|
joe-wojniak/crypto_compare
|
048ac3ef34ccfc5100ce5310318787fb222f55da
|
[
"MIT"
] | 2
|
2017-12-25T22:11:07.000Z
|
2018-11-24T08:19:07.000Z
|
crypto_compare/client.py
|
joe-wojniak/crypto_compare
|
048ac3ef34ccfc5100ce5310318787fb222f55da
|
[
"MIT"
] | 9
|
2017-11-15T19:01:54.000Z
|
2021-06-19T11:04:27.000Z
|
class Client:
def __init__(self, apikey):
self.apikey = apikey
BASE_URL = 'https://min-api.cryptocompare.com'
COIN_LIST_URL = BASE_URL + '/data/all/coinlist'
COIN_SNAPSHOT_FULL_BY_ID_URL = 'https://www.cryptocompare.com/api/data/coinsnapshotfullbyid/?id='
COIN_SNAPSHOT_URL = BASE_URL + '/data/top/exchanges/full'
PRICE_URL = BASE_URL + '/data/price'
PRICE_MULTI_URL = BASE_URL + '/data/pricemulti'
PRICE_MULTI_FULL_URL = BASE_URL + '/data/pricemultifull'
PRICE_HISTORICAL_URL = BASE_URL + '/data/pricehistorical'
GENERATE_AVG_URL = BASE_URL + '/data/generateAvg'
DAY_AVG_URL = BASE_URL + '/data/dayAvg'
SUBS_WATCH_LIST_URL = BASE_URL + '/data/subsWatchlist'
SUBS_URL = BASE_URL + '/data/subs'
ALL_EXCHANGES_URL = BASE_URL + '/data/all/exchanges'
TOP_EXCHANGES_URL = BASE_URL + '/data/top/exchanges'
TOP_VOLUMES_URL = BASE_URL + '/data/top/volumes'
TOP_PAIRS_URL = BASE_URL + '/data/top/pairs'
HISTO_DAY_URL = BASE_URL + '/data/histoday'
HISTO_HOUR_URL = BASE_URL + '/data/histohour'
HISTO_MINUTE_URL = BASE_URL + '/data/histominute'
SOCIAL_STATS_URL = 'https://www.cryptocompare.com/api/data/socialstats?id='
MINING_CONTRACTS_URL = BASE_URL + '/data/mining/contracts/general'
MINING_EQUIPMENT_URL = BASE_URL + '/data/mining/equipment/general'
from .apis.coin import coin_list, coin_snapshot_full_by_id, coin_snapshot
from .apis.price import price, price_multi, price_multifull, price_historical
from .apis.average import generate_avg, day_avg
from .apis.subs import subs_watchlist, subs
from .apis.top import top_exchanges, top_volumes, top_pairs
from .apis.histo import histo_day, histo_hour, histo_minute
from .apis.mining import mining_contracts, mining_equipment
from .apis.uncategorized import all_exchanges, social_stats
from .apis.helper import _is_params_valid, _fetch_data, _get_querystring
| 41.574468
| 101
| 0.735415
|
7e86573567ad0520f8b3d4bb00da0491fa042ead
| 285
|
py
|
Python
|
packaging_example/2_and_now_we_are_really_growing/lab/computations/list_methods.py
|
sr105/python_packages_tutorial
|
e795ce1ff14463dd5c3da06d120a4e40da5ad732
|
[
"MIT"
] | 1
|
2021-06-11T17:47:39.000Z
|
2021-06-11T17:47:39.000Z
|
packaging_example/2_and_now_we_are_really_growing/lab/computations/list_methods.py
|
sr105/python_packages_tutorial
|
e795ce1ff14463dd5c3da06d120a4e40da5ad732
|
[
"MIT"
] | null | null | null |
packaging_example/2_and_now_we_are_really_growing/lab/computations/list_methods.py
|
sr105/python_packages_tutorial
|
e795ce1ff14463dd5c3da06d120a4e40da5ad732
|
[
"MIT"
] | null | null | null |
# Computations
def multiply_a_list_by(a_list, number):
"""Return a list with every item multiplied by number."""
return [item * number for item in a_list]
def square_a_list(a_list):
"""Return a list with every item squared."""
return [item * item for item in a_list]
| 28.5
| 61
| 0.701754
|
5acc822ed84d52be956b8af5c4d1f5bff394ab7c
| 7,486
|
py
|
Python
|
xo.py
|
Gooif/iiii-files
|
60096f948db416a05a8a038f8eb15c9d60ad4873
|
[
"MIT"
] | null | null | null |
xo.py
|
Gooif/iiii-files
|
60096f948db416a05a8a038f8eb15c9d60ad4873
|
[
"MIT"
] | null | null | null |
xo.py
|
Gooif/iiii-files
|
60096f948db416a05a8a038f8eb15c9d60ad4873
|
[
"MIT"
] | null | null | null |
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo, GPranks,IDrank
from utlis.send import send_msg, BYusers, GetLink,Name,Glang
from utlis.locks import st,getOR
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json
import importlib
from uuid import uuid4
from pyrogram import (
InlineQueryResultArticle, InputTextMessageContent, InlineKeyboardMarkup, InlineKeyboardButton
)
def updateMsgs(client, message,redis):
pass
def kbtotx(kb):
tx = ""
t = ""
i = 0
for n in kb:
if n == 0:
t += "◻️ "
if n == 1:
t += "❌ "
if n == 2:
t += "⭕️ "
i += 1
if i == 3:
tx += "\n"+t
t = ""
i = 0
return tx
def getwin(tb):
winners=((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
win = False
xo = "no"
for ar in winners:
if tb[ar[0]] == 1 and tb[ar[1]] == 1 and tb[ar[2]] ==1:
win = True
xo = tb[ar[0]]
break
if tb[ar[0]] == 2 and tb[ar[1]] == 2 and tb[ar[2]] ==2:
win = True
xo = tb[ar[0]]
break
if win == False and 0 not in tb:
win = True
xo = "tie"
return win,xo
def get(client,userID,userFN,p1,p2):
if userID == int(p1):
fn1 = userFN
else:
try:
getUser = client.get_users(int(p1))
fn1 = getUser.first_name
except Exception as e:
fn1 = p1
if userID == int(p2):
fn2 = userFN
else:
try:
getUser = client.get_users(int(p2))
fn2 = getUser.first_name
except Exception as e:
fn2 = p2
return fn1,fn2
def updateCb(client, callback_query,redis):
if callback_query.inline_message_id:
return False
date = callback_query.data
userID = callback_query.from_user.id
userFN = callback_query.from_user.first_name
username = callback_query.from_user.username
chatID = callback_query.message.chat.id
message_id = callback_query.message.message_id
go = """{}꒐ ({}),❌
{}꒐ ({}),⭕️"""
go3 = """{}꒐ ({})
{}꒐ ({})
🎊꒐ الفائز ({})"""
go2 = """{}꒐ ({})
{}꒐ ({})
🔴꒐ تعادل"""
if re.search("rex=",date):
tx = callback_query.message.text
p1 = date.split("=")[1]
if userID == int(p1):
start = """👋🏻꒐ ❌⭕️
👤꒐ اضغط للعب مع ({})""".format(userFN)
kb = InlineKeyboardMarkup([[InlineKeyboardButton("العب", callback_data="xo="+str(userID))]])
Bot("sendMessage",{"chat_id":chatID,"text":start,"disable_web_page_preview":True,"reply_markup":kb})
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":tx,"disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":"عذراً اللعبه ليست لك","show_alert":True})
if re.search("^xo.pyplay$",date):
start = """👋🏻꒐ ❌⭕️
👤꒐ اضغط للعب مع ({})""".format(userFN)
kb = InlineKeyboardMarkup([[InlineKeyboardButton("العب", callback_data="xo="+str(userID))]])
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":start,"disable_web_page_preview":True,"reply_markup":kb})
if re.search("xo=",date):
p1 = date.split("=")[1]
if userID == int(p1):
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":"انت من بدأت اللعبه انتظر احد اصدقائك","show_alert":True})
return False
try:
getUser = client.get_users(p1)
fn1 = getUser.first_name
except Exception as e:
fn1 = p1
p2 = userID
fn2 = userFN
tb = [0,0,0,0,0,0,0,0,0]
cd = "xp{}={}={}={}".format(1,p1,p2,tb)
i = 0
x = 0
ar = []
a = []
em = "◻️"
while i < 3:
while x < 3:
cd = "xp{}={}={}={}={}.{}".format(1,p1,p2,tb,i,x)
a.append(InlineKeyboardButton(em,callback_data=cd))
x += 1
i += 1
x = 0
ar.append(a)
a = []
ar.append([InlineKeyboardButton("-",url="t.me/N00NN0")])
kb = InlineKeyboardMarkup(ar)
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":go.format("👉🏻",fn1,"🔄",fn2),"disable_web_page_preview":True,"reply_markup":kb})
if re.search("xp(1|2)=",date):
play = date.split("=")[0].replace("xp","")
p1 = date.split("=")[1]
p2 = date.split("=")[2]
ck = date.split("=")[3]
rt = date.split("=")[4]
r = rt.split(".")[0]
t = rt.split(".")[1]
if int(play) == 1:
playing = p1
nextT = p2
nextTN = 2
xo = 1#"X"
em = "❌"
elif int(play) == 2:
playing = p2
xo = 2#"O"
nextT = p1
nextTN = 1
em = "⭕️"
if userID != int(playing):
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":"انتظر دورك","show_alert":True})
return False
x = 0
a =[]
ar = []
for i in json.loads(ck):
a.append(i)
x += 1
if x == 3:
x = 0
ar.append(a)
a = []
tx = callback_query.message.reply_markup.inline_keyboard
if ar[int(r)][int(t)] == 0:
ar[int(r)][int(t)] = xo
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":"لايمكنك اللعب هنا","show_alert":True})
return False
tb = ar[0] + ar[1] + ar[2]
win,xRo = getwin(tb)
if win:
if xRo == 1:
fn1,fn2 = get(client,userID,userFN,p1,p2)
redis.hincrby("{}Nbot:{}:points".format(BOT_ID,chatID),p1,10)
kb = InlineKeyboardMarkup([[InlineKeyboardButton("اللعب مجدداً",callback_data="rex={}".format(p1))]])
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":go3.format("❌",fn1,"⭕️",fn2,fn1)+"\n"+kbtotx(tb),"disable_web_page_preview":True,"reply_markup":kb})
return False
if xRo == 2:
fn1,fn2 = get(client,userID,userFN,p1,p2)
redis.hincrby("{}Nbot:{}:points".format(BOT_ID,chatID),p2,10)
kb = InlineKeyboardMarkup([[InlineKeyboardButton("اللعب مجدداً",callback_data="rex={}".format(p1))]])
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":go3.format("❌",fn1,"⭕️",fn2,fn2)+"\n"+kbtotx(tb),"disable_web_page_preview":True,"reply_markup":kb})
return False
if xRo == "tie":
fn1,fn2 = get(client,userID,userFN,p1,p2)
redis.hincrby("{}Nbot:{}:points".format(BOT_ID,chatID),p1,3)
redis.hincrby("{}Nbot:{}:points".format(BOT_ID,chatID),p2,3)
kb = InlineKeyboardMarkup([[InlineKeyboardButton("اللعب مجدداً",callback_data="rex={}".format(p1))]])
Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":go2.format("❌",fn1,"⭕️",fn2)+"\n"+kbtotx(tb),"disable_web_page_preview":True,"reply_markup":kb})
return False
win = getwin(tb)
i = 0
x = 0
ar = []
a = []
while i < 3:
while x < 3:
cd = "xp{}={}={}={}={}.{}".format(nextTN,p1,p2,tb,i,x)
if int(r) == i and int(t) == x:
a.append(InlineKeyboardButton(em,callback_data=cd))
else:
a.append(InlineKeyboardButton(tx[i][x].text,callback_data=cd))
x += 1
i += 1
x = 0
ar.append(a)
a = []
ar.append([InlineKeyboardButton("📣",url="t.me/N00NN0")])
kb = InlineKeyboardMarkup(ar)
if nextTN == 1:
e1 = "👉🏻"
else:
e1 = "🔄"
if nextTN == 2:
e2 = "👉🏻"
else:
e2 = "🔄"
fn1,fn2 = get(client,userID,userFN,p1,p2)
v= Bot("editMessageText",{"chat_id":chatID,"message_id":message_id,"text":go.format(e1,fn1,e2,fn2),"disable_web_page_preview":True,"reply_markup":kb})
| 30.555102
| 179
| 0.583088
|
0f361c22ba07987763abd44e4b5ad4ac00e76f16
| 193
|
py
|
Python
|
ex051.py
|
honeyhugh/PythonCurso
|
e5b8efe04e100ea0b0c0aacde1caf7ae52489f40
|
[
"MIT"
] | null | null | null |
ex051.py
|
honeyhugh/PythonCurso
|
e5b8efe04e100ea0b0c0aacde1caf7ae52489f40
|
[
"MIT"
] | null | null | null |
ex051.py
|
honeyhugh/PythonCurso
|
e5b8efe04e100ea0b0c0aacde1caf7ae52489f40
|
[
"MIT"
] | null | null | null |
n1 = int(input('Digite o 1º termo: '))
r = int(input('Digite a razão: '))
print('Os dez primeiros termos dessa PA são:')
for c in range(n1, n1 + 10 * r, r):
print(c, end=', ')
print('fim')
| 27.571429
| 46
| 0.601036
|
83b441edcc83909a9ebd00af49ef8c901c05c222
| 4,849
|
py
|
Python
|
src/qt/qtwebkit/Tools/QueueStatusServer/model/attachment.py
|
viewdy/phantomjs
|
eddb0db1d253fd0c546060a4555554c8ee08c13c
|
[
"BSD-3-Clause"
] | 1
|
2015-05-27T13:52:20.000Z
|
2015-05-27T13:52:20.000Z
|
src/qt/qtwebkit/Tools/QueueStatusServer/model/attachment.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | null | null | null |
src/qt/qtwebkit/Tools/QueueStatusServer/model/attachment.py
|
mrampersad/phantomjs
|
dca6f77a36699eb4e1c46f7600cca618f01b0ac3
|
[
"BSD-3-Clause"
] | 1
|
2017-03-19T13:03:23.000Z
|
2017-03-19T13:03:23.000Z
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from model.queues import Queue
from model.queuestatus import QueueStatus
from model.workitems import WorkItems
class Attachment(object):
@classmethod
def recent(cls, limit=1):
statuses = QueueStatus.all().order("-date")
# Notice that we use both a set and a list here to keep the -date ordering.
ids = []
visited_ids = set()
for status in statuses:
attachment_id = status.active_patch_id
if not attachment_id:
continue
if attachment_id in visited_ids:
continue
visited_ids.add(attachment_id)
ids.append(attachment_id)
if len(visited_ids) >= limit:
break
return map(cls, ids)
def __init__(self, attachment_id):
self.id = attachment_id
self._summary = None
self._cached_queue_positions = None
def summary(self):
if self._summary:
return self._summary
self._summary = self._fetch_summary()
return self._summary
def state_from_queue_status(self, status):
table = {
"Pass" : "pass",
"Fail" : "fail",
}
state = table.get(status.message)
if state:
return state
if status.message.startswith("Error:"):
return "error"
if status:
return "pending"
return None
def position_in_queue(self, queue):
return self._queue_positions().get(queue.name())
def status_for_queue(self, queue):
# summary() is a horrible API and should be killed.
queue_summary = self.summary().get(queue.name_with_underscores())
if not queue_summary:
return None
return queue_summary.get("status")
def bug_id(self):
return self.summary().get("bug_id")
def _queue_positions(self):
if self._cached_queue_positions:
return self._cached_queue_positions
# FIXME: Should we be mem-caching this?
self._cached_queue_positions = self._calculate_queue_positions()
return self._cached_queue_positions
def _calculate_queue_positions(self):
all_work_items = WorkItems.all().fetch(limit=len(Queue.all()))
return dict([(items.queue.name(), items.display_position_for_attachment(self.id)) for items in all_work_items])
# FIXME: This is controller/view code and does not belong in a model.
def _fetch_summary(self):
summary = { "attachment_id" : self.id }
first_status = QueueStatus.all().filter('active_patch_id =', self.id).get()
if not first_status:
# We don't have any record of this attachment.
return summary
summary["bug_id"] = first_status.active_bug_id
for queue in Queue.all():
summary[queue.name_with_underscores()] = None
status = QueueStatus.all().filter('queue_name =', queue.name()).filter('active_patch_id =', self.id).order('-date').get()
if status:
# summary() is a horrible API and should be killed.
summary[queue.name_with_underscores()] = {
"state": self.state_from_queue_status(status),
"status": status,
}
return summary
| 39.104839
| 133
| 0.66323
|
f8e3bb31903275953aa11986300ccc8f94aa8642
| 3,298
|
py
|
Python
|
src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | 5
|
2020-11-15T12:27:28.000Z
|
2021-09-20T03:50:54.000Z
|
src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | null | null | null |
src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py
|
georgia-tech-synergy-lab/gem5_astra
|
41695878a2b60c5a28fa104465558cd1acb8a695
|
[
"BSD-3-Clause"
] | 2
|
2020-10-27T01:15:41.000Z
|
2020-11-16T02:30:32.000Z
|
# Copyright (c) 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Tushar Krishna
from m5.objects.MemObject import MemObject
from m5.params import *
from m5.proxy import *
class GarnetSyntheticTraffic(MemObject):
type = 'GarnetSyntheticTraffic'
cxx_header = \
"cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh"
block_offset = Param.Int(6, "block offset in bits")
num_dest = Param.Int(1, "Number of Destinations")
#mycode
burst_length = Param.Int(1, "busrt_length")
burst_interval = Param.Int(1, "busrt_interval")
num_packages = Param.Int(1, "num_packages")
memory_size = Param.Int(65536, "memory size")
sim_cycles = Param.Int(1000, "Number of simulation cycles")
num_packets_max = Param.Int(-1, "Max number of packets to send. \
Default is to keep sending till simulation ends")
single_sender = Param.Int(-1, "Send only from this node. \
By default every node sends")
single_dest = Param.Int(-1, "Send only to this dest. \
Default depends on traffic_type")
traffic_type = Param.String("uniform_random", "Traffic type")
inj_rate = Param.Float(0.1, "Packet injection rate")
inj_vnet = Param.Int(-1, "Vnet to inject in. \
0 and 1 are 1-flit, 2 is 5-flit. \
Default is to inject in all three vnets")
precision = Param.Int(3, "Number of digits of precision \
after decimal point")
response_limit = Param.Cycles(5000000, "Cycles before exiting \
due to lack of progress")
test = MasterPort("Port to the memory system to test")
system = Param.System(Parent.any, "System we belong to")
| 52.349206
| 73
| 0.701941
|
dd8b050956ae03d5159d15cc8313e4f99d55b95f
| 2,445
|
py
|
Python
|
source/scripts/python/watzon/source/watzon.py
|
gargakshit/core
|
84868a3e3151088c68520f9db9235e03c0ac0d11
|
[
"Apache-2.0"
] | 928
|
2018-12-26T22:40:59.000Z
|
2022-03-31T12:17:43.000Z
|
source/scripts/python/watzon/source/watzon.py
|
gargakshit/core
|
84868a3e3151088c68520f9db9235e03c0ac0d11
|
[
"Apache-2.0"
] | 132
|
2019-03-01T21:01:17.000Z
|
2022-03-17T09:00:42.000Z
|
source/scripts/python/watzon/source/watzon.py
|
gargakshit/core
|
84868a3e3151088c68520f9db9235e03c0ac0d11
|
[
"Apache-2.0"
] | 112
|
2019-01-15T09:36:11.000Z
|
2022-03-12T06:39:01.000Z
|
import sys
import ast
# import contextlib
from os import remove
from io import StringIO
async def python_eval(expression):
response, out = await async_eval(expression)
return [response, out]
async def async_eval(code, **kwargs):
# Note to self: please don't set globals here as they will be lost.
# Don't clutter locals
locs = {}
# Restore globals later
globs = globals().copy()
# This code saves __name__ and __package into a kwarg passed to the function.
# It is set before the users code runs to make sure relative imports work
global_args = "_globs"
while global_args in globs.keys():
# Make sure there's no name collision, just keep prepending _s
global_args = "_" + global_args
kwargs[global_args] = {}
for glob in ["__name__", "__package__"]:
# Copy data to args we are sending
kwargs[global_args][glob] = globs[glob]
root = ast.parse(code, 'exec')
code = root.body
# If we can use it as a lambda return (but multiline)
if isinstance(code[-1], ast.Expr):
# Change it to a return statement
code[-1] = ast.copy_location(ast.Return(code[-1].value), code[-1])
# globals().update(**<global_args>)
glob_copy = ast.Expr(ast.Call(func=ast.Attribute(value=ast.Call(func=ast.Name(id='globals', ctx=ast.Load()),
args=[], keywords=[]),
attr='update', ctx=ast.Load()),
args=[], keywords=[ast.keyword(arg=None,
value=ast.Name(id=global_args, ctx=ast.Load()))]))
glob_copy.lineno = 0
glob_copy.col_offset = 0
ast.fix_missing_locations(glob_copy)
code.insert(0, glob_copy)
args = []
for a in list(map(lambda x: ast.arg(x, None), kwargs.keys())):
a.lineno = 0
a.col_offset = 0
args += [a]
fun = ast.AsyncFunctionDef('tmp', ast.arguments(
args=[],
vararg=None,
kwonlyargs=args,
posonlyargs=[],
kwarg=None,
defaults=[],
kw_defaults=[None for i in range(len(args))]), code, [], None
)
fun.lineno = 0
fun.col_offset = 0
mod = ast.Module([fun], type_ignores=[])
comp = compile(mod, '<string>', 'exec')
exec(comp, {}, locs)
with temp_stdio() as out:
result = await locs["tmp"](**kwargs)
try:
globals().clear()
# Inconsistent state
finally:
globals().update(**globs)
return result, out.getvalue()
def temp_stdio(stdout=None, stderr=None):
"""Create a temporary STDIO for capturing results"""
old_out = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old_out
| 29.817073
| 109
| 0.676483
|
6de62933c02352bdcc55a94d71ec1bbd55915fa4
| 1,774
|
py
|
Python
|
src/constants.py
|
shigashiyama/seikanlp
|
fa052bf4062a1be01458f03fe11c833f887cc127
|
[
"MIT"
] | 4
|
2019-08-14T10:51:51.000Z
|
2021-03-18T07:53:20.000Z
|
src/constants.py
|
shigashiyama/seikanlp
|
fa052bf4062a1be01458f03fe11c833f887cc127
|
[
"MIT"
] | null | null | null |
src/constants.py
|
shigashiyama/seikanlp
|
fa052bf4062a1be01458f03fe11c833f887cc127
|
[
"MIT"
] | null | null | null |
import numpy as np
global __version
__version__ = 'v0.1.0'
### task
TASK_SEG = 'seg'
TASK_SEGTAG = 'segtag'
TASK_TAG = 'tag'
TASK_DEP = 'dep'
TASK_TDEP = 'tdep'
TASK_MTMD_SEG = 'mtmd_seg'
TASK_MTMD_SEGTAG = 'mtmd_segtag'
TASK_MTMD_TAG = 'mtmd_tag'
### for analyzer
LOG_DIR = 'log'
MODEL_DIR = 'models/main'
### for data io
PADDING_LABEL = -1
NUM_FOR_REPORTING = 100000
SL_COLUMN_DELIM = '\t'
SL_TOKEN_DELIM = ' '
SL_ATTR_DELIM = '_'
WL_TOKEN_DELIM = '\n'
WL_ATTR_DELIM = '\t'
KEY_VALUE_DELIM = '='
SUBATTR_SEPARATOR = '-'
COMMENT_SYM = '#'
ATTR_INFO_DELIM = ','
ATTR_INFO_DELIM2 = ':'
ATTR_INFO_DELIM3 = '_'
SL_FORMAT = 'sl'
WL_FORMAT = 'wl'
DEFAULT_LENGTH_LIMIT = 6
### for dictionary
UNK_SYMBOL = '<UNK>'
NUM_SYMBOL = '<NUM>'
NONE_SYMBOL = '<NONE>'
ROOT_SYMBOL = '<ROOT>'
CHUNK = 'chunk'
UNIGRAM = 'unigram'
BIGRAM = 'bigram'
SEG_LABEL = 'seg_label'
ARC_LABEL = 'arc_label'
ATTR_LABEL = 'attr{}_label'.format
DOMAIN = 'domain'
TYPE_HIRA = '<HR>'
TYPE_KATA = '<KT>'
TYPE_LONG = '<LG>'
TYPE_KANJI = '<KJ>'
TYPE_ALPHA = '<AL>'
TYPE_DIGIT = '<DG>'
TYPE_SPACE = '<SC>'
TYPE_SYMBOL = '<SY>'
TYPE_ASCII_OTHER = '<AO>'
BOS = '<B>' # unused
EOS = '<E>'
SEG_LABELS = 'BIES'
B = 'B'
I = 'I'
E = 'E'
S = 'S'
O = 'O'
### for hybrid char+word segmentation
CON='CON'
WCON='WCON'
AVG='AVG'
WAVG='WAVG'
### for parsing
NO_PARENTS_ID = -2 # indicate that parent of the word is nothing
UNK_PARENT_ID = -3 # indicate that parent of the word is unknown
### for feature extraction
# example of expected input: 'seg:L:2-3,4-5,6-10'
FEAT_TEMP_DELIM = ':'
FEAT_TEMP_RANGE_DELIM = ','
FEAT_TEMP_RANGE_SYM = '-'
L = 'L'
R = 'R'
| 17.74
| 70
| 0.609921
|
46de4ec280f94390bef59a94185ef204378d19d3
| 479
|
py
|
Python
|
tweepy_client.py
|
thecodepixi/hug-bot
|
9ac20ea13b482522a8b7a37d3a6a3623a09f6319
|
[
"MIT"
] | 15
|
2020-02-26T17:26:40.000Z
|
2021-05-05T19:51:52.000Z
|
tweepy_client.py
|
thecodepixi/hug-bot
|
9ac20ea13b482522a8b7a37d3a6a3623a09f6319
|
[
"MIT"
] | 8
|
2020-02-23T04:38:18.000Z
|
2020-12-29T00:40:07.000Z
|
tweepy_client.py
|
thecodepixi/hug-bot
|
9ac20ea13b482522a8b7a37d3a6a3623a09f6319
|
[
"MIT"
] | 4
|
2020-02-27T00:20:04.000Z
|
2020-12-29T00:24:26.000Z
|
from os import environ
import tweepy
from statuses import STATUSES
def twitter_api():
twitter_api_key = environ['twitter_api_key']
twitter_api_secret = environ['twitter_api_secret']
twitter_access_token = environ['twitter_access_token']
twitter_access_secret = environ['twitter_access_secret']
auth = tweepy.OAuthHandler(twitter_api_key, twitter_api_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
return tweepy.API(auth)
| 34.214286
| 70
| 0.797495
|
1afe5666801a5ebac0d33fe7067a25d3165c1447
| 1,014
|
py
|
Python
|
main.py
|
vantage-ola/youtubedl-appDownloader
|
e54ff787e741e3056f130bbbc638eada17e6f586
|
[
"MIT"
] | 1
|
2021-08-19T22:06:30.000Z
|
2021-08-19T22:06:30.000Z
|
main.py
|
vantage-ola/youtubedl-appDownloader
|
e54ff787e741e3056f130bbbc638eada17e6f586
|
[
"MIT"
] | null | null | null |
main.py
|
vantage-ola/youtubedl-appDownloader
|
e54ff787e741e3056f130bbbc638eada17e6f586
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.uix.recycleview import RecycleView
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty, NumericProperty, StringProperty
from kivy.core.window import Window
from kivy.utils import get_color_from_hex
from download import checkOnlineActivity, checkValidUrl, videoQuality,videoOptionDict,downloadThread
#BackGround Colour
Window.clearcolor = get_color_from_hex('#101216')
class RV(RecycleView):
pass
class AddDownloadForm(BoxLayout):
video_link = ObjectProperty()
def checkVideoLink(self):
#https://www.youtube.com/watch?v=tPEE9ZwTmy0
checkVideoLink=checkValidUrl(url=self.video_link.text)
return checkVideoLink
def start_download(self):
ydl_opts = videoOptionDict()
url=self.video_link.text
download=downloadThread(url ,ydl_opts)
download.start()
class youtubeDownload(App):
def build(self):
return
if __name__ == '__main__':
youtubeDownload().run()
| 26.684211
| 101
| 0.751479
|
0ad5c5b70cbbc78b4d9e9e42c825d0eb72e55df8
| 2,369
|
py
|
Python
|
data/p4VQE/R2/benchmark/startQiskit_noisy20.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_noisy20.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R2/benchmark/startQiskit_noisy20.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=8
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.rx(2.7457519792374794,input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=6
prog.cx(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_noisy20.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 26.920455
| 118
| 0.633179
|
a5b27dc483a0aed22f6bc47c97d1cea8641fb627
| 3,162
|
py
|
Python
|
k-means/loadata.py
|
ysx001/IIC
|
e72eb0833785e867ded0a9bac47ce1d1f9f47b4b
|
[
"MIT"
] | null | null | null |
k-means/loadata.py
|
ysx001/IIC
|
e72eb0833785e867ded0a9bac47ce1d1f9f47b4b
|
[
"MIT"
] | null | null | null |
k-means/loadata.py
|
ysx001/IIC
|
e72eb0833785e867ded0a9bac47ce1d1f9f47b4b
|
[
"MIT"
] | null | null | null |
#!python
#!/usr/bin/env python
from scipy.io import loadmat
from glob import glob
import os.path as osp
root = '/home/sarah/DiffSeg-Data/'
subjects = sorted(glob(osp.join(root, 'mwu*')))
print(len(subjects))
# %% Write the labels acutally in data to labels.csv
# subjects = sorted(glob(osp.join(root, 'mwu*')))
# actual_labels = {} # key: labels, value: counts
# for subject_id in subjects:
# image_mat = loadmat(osp.join(root, subject_id, "data.mat"))
# for s in range(image_mat['segs'].shape[2]):
# label = image_mat['segs'][:, :, s, 1]
# for i in range(len(label)):
# for j in range(len(label[0])):
# if label[i, j] not in actual_labels:
# actual_labels[label[i, j]] = 1
# else:
# actual_labels[label[i, j]] += 1
# import csv
# w = csv.writer(open(osp.join(root, "labels.csv"), "w"))
# for key, val in actual_labels.items():
# w.writerow([key, val])
# print(len(actual_labels))
# print(actual_labels)
# %% Read the names of the labels and write them together with counts
# label_names = {}
# count = 0
# with open("/home/sarah/IIC/code/datasets/segmentation/FreeSurferColorLUT.txt") as f:
# for line in f:
# vals = line.split()
# if len(vals) > 2 and vals[0].isdigit():
# count+=1
# label_names[vals[0]] = vals[1]
# print (count)
# import csv
# w = csv.writer(open(osp.join(root, "labelNameCount.csv"), "w"))
# index = 0
# with open(osp.join(root, "labels.csv")) as label_counts:
# reader = csv.reader(label_counts)
# for rows in reader:
# label = rows[0]
# count = rows[1]
# name = label_names[label]
# w.writerow([label, index, count, name])
# index += 1
# %% Read the names of the labels and write them together with counts while combining based on category
label_names = {}
count = 0
with open("/home/sarah/IIC/code/datasets/segmentation/FreeSurferColorLUT.txt") as f:
for line in f:
vals = line.split()
if len(vals) > 2 and vals[0].isdigit():
count+=1
label_names[vals[0]] = vals[1]
print (count)
import csv
w = csv.writer(open(osp.join(root, "combinedLabels.csv"), "w"))
index = 0
wm = [2, 41, 77, 7, 46]
wm.append(range(251, 256))
with open(osp.join(root, "labels.csv")) as label_counts:
reader = csv.reader(label_counts)
for rows in reader:
label = rows[0]
count = rows[1]
name = label_names[label]
w.writerow([label, index, count, name])
index += 1
# import matplotlib.pyplot as plt
# f, axarr = plt.subplots(3,2)
# # plt.show()
# print(x['imgs'][:, :, slide, 1].min(), x['imgs'][:, :, slide, 1].max())
# axarr[0,0].imshow(x['imgs'][:, :, slide, 0])
# axarr[0,1].imshow(x['imgs'][:, :, slide, 1])
# axarr[1,0].imshow(x['imgs'][:, :, slide, 2])
# axarr[1,1].imshow(x['imgs'][:, :, slide, 3])
# # axarr[2,0].imshow(x['segs'][:, :, slide, 0], cmap='plasma', vmin=0, vmax=77)
# axarr[2,0].imshow(x['segs'][:, :, slide, 1], cmap='plasma', vmin=0, vmax=2033)
# axarr[2,1].imshow(label, cmap='plasma')
# # plt.colorbar()
# plt.show()
# %%
| 30.114286
| 103
| 0.590765
|
f927bde48619c19bf9acad8505dafa89dab1736c
| 2,670
|
py
|
Python
|
tests/test_multidb_migrate.py
|
Nibblecomm/Customersite-Migrate
|
499816364e9256a70e794a6aadf0170bc8cbe5ae
|
[
"MIT"
] | null | null | null |
tests/test_multidb_migrate.py
|
Nibblecomm/Customersite-Migrate
|
499816364e9256a70e794a6aadf0170bc8cbe5ae
|
[
"MIT"
] | null | null | null |
tests/test_multidb_migrate.py
|
Nibblecomm/Customersite-Migrate
|
499816364e9256a70e794a6aadf0170bc8cbe5ae
|
[
"MIT"
] | null | null | null |
import os
import shutil
import unittest
import subprocess
import shlex
import sqlite3
def run_cmd(cmd):
"""Run a command and return a tuple with (stdout, stderr, exit_code)"""
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
return stdout, stderr, process.wait()
class TestMigrate(unittest.TestCase):
def setUp(self):
os.chdir(os.path.split(os.path.abspath(__file__))[0])
try:
os.remove('app1.db')
os.remove('app2.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
def tearDown(self):
try:
os.remove('app1.db')
os.remove('app2.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
def test_multidb_migrate_upgrade(self):
(o, e, s) = run_cmd('python app_multidb.py db init --multidb')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('python app_multidb.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd('python app_multidb.py db upgrade')
self.assertTrue(s == 0)
# ensure the tables are in the correct databases
conn1 = sqlite3.connect('app1.db')
c = conn1.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn1.close()
self.assertEqual(tables, [('alembic_version',), ('user',)])
conn2 = sqlite3.connect('app2.db')
c = conn2.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn2.close()
self.assertEqual(tables, [('alembic_version',), ('group',)])
# ensure the databases can be written to
from .app_multidb import db, User, Group
db.session.add(User(name='test'))
db.session.add(Group(name='group'))
db.session.commit()
# ensure the downgrade works
(o, e, s) = run_cmd('python app_multidb.py db downgrade')
self.assertTrue(s == 0)
conn1 = sqlite3.connect('app1.db')
c = conn1.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn1.close()
self.assertEqual(tables, [('alembic_version',)])
conn2 = sqlite3.connect('app2.db')
c = conn2.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn2.close()
self.assertEqual(tables, [('alembic_version',)])
| 30.689655
| 75
| 0.576779
|
9c75c2f08da55fb9751732d448756e8c516ebf86
| 20,467
|
py
|
Python
|
printer/Adafruit_Thermal.py
|
plane000/IOT-recept-printer
|
b7f9269c6d7c943aed626a4eb31223ed91a9d0d8
|
[
"MIT"
] | null | null | null |
printer/Adafruit_Thermal.py
|
plane000/IOT-recept-printer
|
b7f9269c6d7c943aed626a4eb31223ed91a9d0d8
|
[
"MIT"
] | null | null | null |
printer/Adafruit_Thermal.py
|
plane000/IOT-recept-printer
|
b7f9269c6d7c943aed626a4eb31223ed91a9d0d8
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from serial import Serial
import time
import sys
class Adafruit_Thermal(Serial):
resumeTime = 0.0
byteTime = 0.0
dotPrintTime = 0.0
dotFeedTime = 0.0
prevByte = '\n'
column = 0
maxColumn = 32
charHeight = 120
lineSpacing = 8
barcodeHeight = 50
printMode = 0
defaultHeatTime = 120
firmwareVersion = 268
writeToStdout = False
def __init__(self, *args, **kwargs):
# NEW BEHAVIOR: if no parameters given, output is written
# to stdout, to be piped through 'lp -o raw' (old behavior
# was to use default port & baud rate).
baudrate = 9600
if len(args) == 0:
self.writeToStdout = True
if len(args) == 1:
# If only port is passed, use default baud rate.
args = [ args[0], baudrate ]
elif len(args) == 2:
# If both passed, use those values.
baudrate = args[1]
# Firmware is assumed version 2.68. Can override this
# with the 'firmware=X' argument, where X is the major
# version number * 100 + the minor version number (e.g.
# pass "firmware=264" for version 2.64.
self.firmwareVersion = kwargs.get('firmware', 268)
if self.writeToStdout is False:
# Calculate time to issue one byte to the printer.
# 11 bits (not 8) to accommodate idle, start and
# stop bits. Idle time might be unnecessary, but
# erring on side of caution here.
self.byteTime = 11.0 / float(baudrate)
Serial.__init__(self, *args, **kwargs)
# Remainder of this method was previously in begin()
# The printer can't start receiving data immediately
# upon power up -- it needs a moment to cold boot
# and initialize. Allow at least 1/2 sec of uptime
# before printer can receive data.
self.timeoutSet(0.5)
self.wake()
self.reset()
# Description of print settings from p. 23 of manual:
# ESC 7 n1 n2 n3 Setting Control Parameter Command
# Decimal: 27 55 n1 n2 n3
# max heating dots, heating time, heating interval
# n1 = 0-255 Max heat dots, Unit (8dots), Default: 7 (64 dots)
# n2 = 3-255 Heating time, Unit (10us), Default: 80 (800us)
# n3 = 0-255 Heating interval, Unit (10us), Default: 2 (20us)
# The more max heating dots, the more peak current
# will cost when printing, the faster printing speed.
# The max heating dots is 8*(n1+1). The more heating
# time, the more density, but the slower printing
# speed. If heating time is too short, blank page
# may occur. The more heating interval, the more
# clear, but the slower printing speed.
heatTime = kwargs.get('heattime', self.defaultHeatTime)
self.writeBytes(
27, # Esc
55, # 7 (print settings)
11, # Heat dots
heatTime, # Lib default
40) # Heat interval
# Description of print density from p. 23 of manual:
# DC2 # n Set printing density
# Decimal: 18 35 n
# D4..D0 of n is used to set the printing density.
# Density is 50% + 5% * n(D4-D0) printing density.
# D7..D5 of n is used to set the printing break time.
# Break time is n(D7-D5)*250us.
# (Unsure of default values -- not documented)
printDensity = 10 # 100%
printBreakTime = 2 # 500 uS
self.writeBytes(
18, # DC2
35, # Print density
(printBreakTime << 5) | printDensity)
self.dotPrintTime = 0.03
self.dotFeedTime = 0.0021
else:
self.reset() # Inits some vars
def end(self):
self.close()
# Because there's no flow control between the printer and computer,
# special care must be taken to avoid overrunning the printer's
# buffer. Serial output is throttled based on serial speed as well
# as an estimate of the device's print and feed rates (relatively
# slow, being bound to moving parts and physical reality). After
# an operation is issued to the printer (e.g. bitmap print), a
# timeout is set before which any other printer operations will be
# suspended. This is generally more efficient than using a delay
# in that it allows the calling code to continue with other duties
# (e.g. receiving or decoding an image) while the printer
# physically completes the task.
# Sets estimated completion time for a just-issued task.
def timeoutSet(self, x):
self.resumeTime = time.time() + x
# Waits (if necessary) for the prior task to complete.
def timeoutWait(self):
if self.writeToStdout is False:
while (time.time() - self.resumeTime) < 0: pass
# Printer performance may vary based on the power supply voltage,
# thickness of paper, phase of the moon and other seemingly random
# variables. This method sets the times (in microseconds) for the
# paper to advance one vertical 'dot' when printing and feeding.
# For example, in the default initialized state, normal-sized text
# is 24 dots tall and the line spacing is 32 dots, so the time for
# one line to be issued is approximately 24 * print time + 8 * feed
# time. The default print and feed times are based on a random
# test unit, but as stated above your reality may be influenced by
# many factors. This lets you tweak the timing to avoid excessive
# delays and/or overrunning the printer buffer.
def setTimes(self, p, f):
# Units are in microseconds for
# compatibility with Arduino library
self.dotPrintTime = p / 1000000.0
self.dotFeedTime = f / 1000000.0
# 'Raw' byte-writing method
def writeBytes(self, *args):
if self.writeToStdout:
for arg in args:
sys.stdout.write(chr(arg))
else:
self.timeoutWait()
self.timeoutSet(len(args) * self.byteTime)
for arg in args:
try:
super(Adafruit_Thermal, self).write(chr(arg).encode())
except Exception:
print(Exception)
# Override write() method to keep track of paper feed.
def write(self, *data):
for i in range(len(data)):
c = data[i]
if self.writeToStdout:
sys.stdout.write(c)
continue
if c != 0x13:
self.timeoutWait()
super(Adafruit_Thermal, self).write(c.encode())
d = self.byteTime
if ((c == '\n') or
(self.column == self.maxColumn)):
# Newline or wrap
if self.prevByte == '\n':
# Feed line (blank)
d += ((self.charHeight +
self.lineSpacing) *
self.dotFeedTime)
else:
# Text line
d += ((self.charHeight *
self.dotPrintTime) +
(self.lineSpacing *
self.dotFeedTime))
self.column = 0
# Treat wrap as newline
# on next pass
c = '\n'
else:
self.column += 1
self.timeoutSet(d)
self.prevByte = c
# The bulk of this method was moved into __init__,
# but this is left here for compatibility with older
# code that might get ported directly from Arduino.
def begin(self, heatTime=defaultHeatTime):
self.writeBytes(
27, # Esc
55, # 7 (print settings)
11, # Heat dots
heatTime,
40) # Heat interval
def reset(self):
self.writeBytes(27, 64) # Esc @ = init command
self.prevByte = '\n' # Treat as if prior line is blank
self.column = 0
self.maxColumn = 32
self.charHeight = 24
self.lineSpacing = 6
self.barcodeHeight = 50
if self.firmwareVersion >= 264:
# Configure tab stops on recent printers
self.writeBytes(27, 68) # Set tab stops
self.writeBytes( 4, 8, 12, 16) # every 4 columns,
self.writeBytes(20, 24, 28, 0) # 0 is end-of-list.
# Reset text formatting parameters.
def setDefault(self):
self.online()
self.justify('L')
self.inverseOff()
self.doubleHeightOff()
self.setLineHeight(30)
self.boldOff()
self.underlineOff()
self.setBarcodeHeight(50)
self.setSize('s')
self.setCharset()
self.setCodePage()
def test(self):
self.write("Hello world!")
self.feed(2)
def testPage(self):
self.writeBytes(18, 84)
self.timeoutSet(
self.dotPrintTime * 24 * 26 +
self.dotFeedTime * (6 * 26 + 30))
def setBarcodeHeight(self, val=50):
if val < 1: val = 1
self.barcodeHeight = val
self.writeBytes(29, 104, val)
UPC_A = 0
UPC_E = 1
EAN13 = 2
EAN8 = 3
CODE39 = 4
I25 = 5
CODEBAR = 6
CODE93 = 7
CODE128 = 8
CODE11 = 9
MSI = 10
ITF = 11
CODABAR = 12
def printBarcode(self, text, type):
newDict = { # UPC codes & values for firmwareVersion >= 264
self.UPC_A : 65,
self.UPC_E : 66,
self.EAN13 : 67,
self.EAN8 : 68,
self.CODE39 : 69,
self.ITF : 70,
self.CODABAR : 71,
self.CODE93 : 72,
self.CODE128 : 73,
self.I25 : -1, # NOT IN NEW FIRMWARE
self.CODEBAR : -1,
self.CODE11 : -1,
self.MSI : -1
}
oldDict = { # UPC codes & values for firmwareVersion < 264
self.UPC_A : 0,
self.UPC_E : 1,
self.EAN13 : 2,
self.EAN8 : 3,
self.CODE39 : 4,
self.I25 : 5,
self.CODEBAR : 6,
self.CODE93 : 7,
self.CODE128 : 8,
self.CODE11 : 9,
self.MSI : 10,
self.ITF : -1, # NOT IN OLD FIRMWARE
self.CODABAR : -1
}
if self.firmwareVersion >= 264:
n = newDict[type]
else:
n = oldDict[type]
if n == -1: return
self.feed(1) # Recent firmware requires this?
self.writeBytes(
29, 72, 2, # Print label below barcode
29, 119, 3, # Barcode width
29, 107, n) # Barcode type
self.timeoutWait()
self.timeoutSet((self.barcodeHeight + 40) * self.dotPrintTime)
# Print string
if self.firmwareVersion >= 264:
# Recent firmware: write length byte + string sans NUL
n = len(text)
if n > 255: n = 255
if self.writeToStdout:
sys.stdout.write(chr(n))
for i in range(n):
sys.stdout.write(text[i])
else:
super(Adafruit_Thermal, self).write(chr(n).encode())
for i in range(n):
super(Adafruit_Thermal,
self).write(text[i].encode())
else:
# Older firmware: write string + NUL
if self.writeToStdout:
sys.stdout.write(text)
else:
super(Adafruit_Thermal, self).write(text.encode())
self.prevByte = '\n'
# === Character commands ===
INVERSE_MASK = (1 << 1) # Not in 2.6.8 firmware (see inverseOn())
UPDOWN_MASK = (1 << 2)
BOLD_MASK = (1 << 3)
DOUBLE_HEIGHT_MASK = (1 << 4)
DOUBLE_WIDTH_MASK = (1 << 5)
STRIKE_MASK = (1 << 6)
def setPrintMode(self, mask):
self.printMode |= mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def unsetPrintMode(self, mask):
self.printMode &= ~mask
self.writePrintMode()
if self.printMode & self.DOUBLE_HEIGHT_MASK:
self.charHeight = 48
else:
self.charHeight = 24
if self.printMode & self.DOUBLE_WIDTH_MASK:
self.maxColumn = 16
else:
self.maxColumn = 32
def writePrintMode(self):
self.writeBytes(27, 33, self.printMode)
def normal(self):
self.printMode = 0
self.writePrintMode()
def inverseOn(self):
if self.firmwareVersion >= 268:
self.writeBytes(29, 66, 1)
else:
self.setPrintMode(self.INVERSE_MASK)
def inverseOff(self):
if self.firmwareVersion >= 268:
self.writeBytes(29, 66, 0)
else:
self.unsetPrintMode(self.INVERSE_MASK)
def upsideDownOn(self):
self.setPrintMode(self.UPDOWN_MASK)
def upsideDownOff(self):
self.unsetPrintMode(self.UPDOWN_MASK)
def doubleHeightOn(self):
self.setPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleHeightOff(self):
self.unsetPrintMode(self.DOUBLE_HEIGHT_MASK)
def doubleWidthOn(self):
self.setPrintMode(self.DOUBLE_WIDTH_MASK)
def doubleWidthOff(self):
self.unsetPrintMode(self.DOUBLE_WIDTH_MASK)
def strikeOn(self):
self.setPrintMode(self.STRIKE_MASK)
def strikeOff(self):
self.unsetPrintMode(self.STRIKE_MASK)
def boldOn(self):
self.setPrintMode(self.BOLD_MASK)
def boldOff(self):
self.unsetPrintMode(self.BOLD_MASK)
def justify(self, value):
c = value.upper()
if c == 'C':
pos = 1
elif c == 'R':
pos = 2
else:
pos = 0
self.writeBytes(0x1B, 0x61, pos)
# Feeds by the specified number of lines
def feed(self, x=1):
if self.firmwareVersion >= 264:
self.writeBytes(27, 100, x)
self.timeoutSet(self.dotFeedTime * self.charHeight)
self.prevByte = '\n'
self.column = 0
else:
# datasheet claims sending bytes 27, 100, <x> works,
# but it feeds much more than that. So, manually:
while x > 0:
self.write('\n')
x -= 1
# Feeds by the specified number of individual pixel rows
def feedRows(self, rows):
self.writeBytes(27, 74, rows)
self.timeoutSet(rows * dotFeedTime)
self.prevByte = '\n'
self.column = 0
def flush(self):
self.writeBytes(12) # ASCII FF
def setSize(self, value):
c = value.upper()
if c == 'L': # Large: double width and height
size = 0x11
self.charHeight = 48
self.maxColumn = 16
elif c == 'M': # Medium: double height
size = 0x01
self.charHeight = 48
self.maxColumn = 32
else: # Small: standard width and height
size = 0x00
self.charHeight = 24
self.maxColumn = 32
self.writeBytes(29, 33, size)
prevByte = '\n' # Setting the size adds a linefeed
# Underlines of different weights can be produced:
# 0 - no underline
# 1 - normal underline
# 2 - thick underline
def underlineOn(self, weight=1):
if weight > 2: weight = 2
self.writeBytes(27, 45, weight)
def underlineOff(self):
self.writeBytes(27, 45, 0)
def printBitmap(self, w, h, bitmap, LaaT=False):
rowBytes = (w + 7) / 8 # Round up to next byte boundary
if rowBytes >= 48:
rowBytesClipped = 48 # 384 pixels max width
else:
rowBytesClipped = int(rowBytes)
# if LaaT (line-at-a-time) is True, print bitmaps
# scanline-at-a-time (rather than in chunks).
# This tends to make for much cleaner printing
# (no feed gaps) on large images...but has the
# opposite effect on small images that would fit
# in a single 'chunk', so use carefully!
if LaaT: maxChunkHeight = 1
else: maxChunkHeight = 255
i = 0
for rowStart in range(0, h, maxChunkHeight):
chunkHeight = h - rowStart
if chunkHeight > maxChunkHeight:
chunkHeight = maxChunkHeight
# Timeout wait happens here
self.writeBytes(18, 42, chunkHeight, rowBytesClipped)
for y in range(chunkHeight):
for x in range(rowBytesClipped):
if self.writeToStdout:
sys.stdout.write(
chr(bitmap[i]))
else:
super(Adafruit_Thermal,
self).write(chr(bitmap[i]))
i += 1
i += rowBytes - rowBytesClipped
self.timeoutSet(chunkHeight * self.dotPrintTime)
self.prevByte = '\n'
# Print Image. Requires Python Imaging Library. This is
# specific to the Python port and not present in the Arduino
# library. Image will be cropped to 384 pixels width if
# necessary, and converted to 1-bit w/diffusion dithering.
# For any other behavior (scale, B&W threshold, etc.), use
# the Imaging Library to perform such operations before
# passing the result to this function.
def printImage(self, image, LaaT=False):
from PIL import Image
if image.mode != '1':
image = image.convert('1')
width = image.size[0]
height = image.size[1]
if width > 384:
width = 384
rowBytes = (width + 7) / 8
bitmap = bytearray(rowBytes * height)
pixels = image.load()
for y in range(height):
n = y * rowBytes
x = 0
for b in range(rowBytes):
sum = 0
bit = 128
while bit > 0:
if x >= width: break
if pixels[x, y] == 0:
sum |= bit
x += 1
bit >>= 1
bitmap[n + b] = sum
self.printBitmap(width, height, bitmap, LaaT)
# Take the printer offline. Print commands sent after this
# will be ignored until 'online' is called.
def offline(self):
self.writeBytes(27, 61, 0)
# Take the printer online. Subsequent print commands will be obeyed.
def online(self):
self.writeBytes(27, 61, 1)
# Put the printer into a low-energy state immediately.
def sleep(self):
self.sleepAfter(1) # Can't be 0, that means "don't sleep"
# Put the printer into a low-energy state after
# the given number of seconds.
def sleepAfter(self, seconds):
if self.firmwareVersion >= 264:
self.writeBytes(27, 56, seconds & 0xFF, seconds >> 8)
else:
self.writeBytes(27, 56, seconds)
def wake(self):
self.timeoutSet(0)
self.writeBytes(255)
if self.firmwareVersion >= 264:
time.sleep(0.05) # 50 ms
self.writeBytes(27, 118, 0) # Sleep off (important!)
else:
for i in range(10):
self.writeBytes(27)
self.timeoutSet(0.1)
# Empty method, included for compatibility
# with existing code ported from Arduino.
def listen(self):
pass
# Check the status of the paper using the printers self reporting
# ability. Doesn't match the datasheet...
# Returns True for paper, False for no paper.
def hasPaper(self):
if self.firmwareVersion >= 264:
self.writeBytes(27, 118, 0)
else:
self.writeBytes(29, 114, 0)
# Bit 2 of response seems to be paper status
stat = ord(self.read(1)) & 0b00000100
# If set, we have paper; if clear, no paper
return stat == 0
def setLineHeight(self, val=32):
if val < 24: val = 24
self.lineSpacing = val - 24
# The printer doesn't take into account the current text
# height when setting line height, making this more akin
# to inter-line spacing. Default line spacing is 32
# (char height of 24, line spacing of 8).
self.writeBytes(27, 51, val)
CHARSET_USA = 0
CHARSET_FRANCE = 1
CHARSET_GERMANY = 2
CHARSET_UK = 3
CHARSET_DENMARK1 = 4
CHARSET_SWEDEN = 5
CHARSET_ITALY = 6
CHARSET_SPAIN1 = 7
CHARSET_JAPAN = 8
CHARSET_NORWAY = 9
CHARSET_DENMARK2 = 10
CHARSET_SPAIN2 = 11
CHARSET_LATINAMERICA = 12
CHARSET_KOREA = 13
CHARSET_SLOVENIA = 14
CHARSET_CROATIA = 14
CHARSET_CHINA = 15
# Alters some chars in ASCII 0x23-0x7E range; see datasheet
def setCharset(self, val=0):
if val > 15: val = 15
self.writeBytes(27, 82, val)
CODEPAGE_CP437 = 0 # USA, Standard Europe
CODEPAGE_KATAKANA = 1
CODEPAGE_CP850 = 2 # Multilingual
CODEPAGE_CP860 = 3 # Portuguese
CODEPAGE_CP863 = 4 # Canadian-French
CODEPAGE_CP865 = 5 # Nordic
CODEPAGE_WCP1251 = 6 # Cyrillic
CODEPAGE_CP866 = 7 # Cyrillic #2
CODEPAGE_MIK = 8 # Cyrillic/Bulgarian
CODEPAGE_CP755 = 9 # East Europe, Latvian 2
CODEPAGE_IRAN = 10
CODEPAGE_CP862 = 15 # Hebrew
CODEPAGE_WCP1252 = 16 # Latin 1
CODEPAGE_WCP1253 = 17 # Greek
CODEPAGE_CP852 = 18 # Latin 2
CODEPAGE_CP858 = 19 # Multilingual Latin 1 + Euro
CODEPAGE_IRAN2 = 20
CODEPAGE_LATVIAN = 21
CODEPAGE_CP864 = 22 # Arabic
CODEPAGE_ISO_8859_1 = 23 # West Europe
CODEPAGE_CP737 = 24 # Greek
CODEPAGE_WCP1257 = 25 # Baltic
CODEPAGE_THAI = 26
CODEPAGE_CP720 = 27 # Arabic
CODEPAGE_CP855 = 28
CODEPAGE_CP857 = 29 # Turkish
CODEPAGE_WCP1250 = 30 # Central Europe
CODEPAGE_CP775 = 31
CODEPAGE_WCP1254 = 32 # Turkish
CODEPAGE_WCP1255 = 33 # Hebrew
CODEPAGE_WCP1256 = 34 # Arabic
CODEPAGE_WCP1258 = 35 # Vietnam
CODEPAGE_ISO_8859_2 = 36 # Latin 2
CODEPAGE_ISO_8859_3 = 37 # Latin 3
CODEPAGE_ISO_8859_4 = 38 # Baltic
CODEPAGE_ISO_8859_5 = 39 # Cyrillic
CODEPAGE_ISO_8859_6 = 40 # Arabic
CODEPAGE_ISO_8859_7 = 41 # Greek
CODEPAGE_ISO_8859_8 = 42 # Hebrew
CODEPAGE_ISO_8859_9 = 43 # Turkish
CODEPAGE_ISO_8859_15 = 44 # Latin 3
CODEPAGE_THAI2 = 45
CODEPAGE_CP856 = 46
CODEPAGE_CP874 = 47
# Selects alt symbols for 'upper' ASCII values 0x80-0xFF
def setCodePage(self, val=0):
if val > 47: val = 47
self.writeBytes(27, 116, val)
# Copied from Arduino lib for parity; may not work on all printers
def tab(self):
self.writeBytes(9)
self.column = (self.column + 4) & 0xFC
# Copied from Arduino lib for parity; may not work on all printers
def setCharSpacing(self, spacing):
self.writeBytes(27, 32, spacing)
# Overloading print() in Python pre-3.0 is dirty pool,
# but these are here to provide more direct compatibility
# with existing code written for the Arduino library.
def print(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
# For Arduino code compatibility again
def println(self, *args, **kwargs):
for arg in args:
self.write(str(arg))
self.write('\n')
| 28.949081
| 72
| 0.656765
|
22e4ac6421d3449689efc69931e02d078d5b4758
| 245
|
py
|
Python
|
ddtrace/ext/kombu.py
|
vijayperiasamy-eb/dd-trace-py
|
2b0d396fc7f76582e8ffedff48933245a77ebaf2
|
[
"BSD-3-Clause"
] | 1
|
2020-03-10T01:45:56.000Z
|
2020-03-10T01:45:56.000Z
|
ddtrace/ext/kombu.py
|
vijayperiasamy-eb/dd-trace-py
|
2b0d396fc7f76582e8ffedff48933245a77ebaf2
|
[
"BSD-3-Clause"
] | null | null | null |
ddtrace/ext/kombu.py
|
vijayperiasamy-eb/dd-trace-py
|
2b0d396fc7f76582e8ffedff48933245a77ebaf2
|
[
"BSD-3-Clause"
] | null | null | null |
# type of the spans
TYPE = 'kombu'
# net extension
VHOST = 'out.vhost'
# standard tags
EXCHANGE = 'kombu.exchange'
BODY_LEN = 'kombu.body_length'
ROUTING_KEY = 'kombu.routing_key'
PUBLISH_NAME = 'kombu.publish'
RECEIVE_NAME = 'kombu.receive'
| 17.5
| 33
| 0.734694
|
833c79e41781588c05316beda10e02e743a1fce6
| 862
|
py
|
Python
|
HashDB/writer.py
|
LordGhostX/HashDB
|
960aeb6558303ff37f3ac3fb2256172271ca0ae6
|
[
"MIT"
] | 10
|
2019-08-15T15:40:52.000Z
|
2021-02-13T09:52:47.000Z
|
HashDB/writer.py
|
LordGhostX/HashDB
|
960aeb6558303ff37f3ac3fb2256172271ca0ae6
|
[
"MIT"
] | 1
|
2019-09-14T22:50:34.000Z
|
2020-07-05T22:44:15.000Z
|
HashDB/writer.py
|
LordGhostX/HashDB
|
960aeb6558303ff37f3ac3fb2256172271ca0ae6
|
[
"MIT"
] | 3
|
2019-08-16T09:47:05.000Z
|
2019-12-15T11:41:41.000Z
|
#HashDB
#Author - LordGhostX
from event_logger import event_log
from crypt import encryptdb
from json import dumps
from time import time
#Creating a new HashDB Database
#Usage: write(<name of database to create>, <data to save as json>, <password if any>, <indentation to use for JSON output>)
def write(dbname, dbcontent, password=None, prettify=True):
try:
with open(dbname + ".hashdb", "w") as db:
if password:
db.write(encryptdb(str(dbcontent), str(password), dbname))
event_log("Successfully saved DB {}".format(dbname), dbname)
else:
if prettify:
db.write(dumps(dbcontent, indent=4))
else:
db.write(dumps(dbcontent))
event_log("Successfully saved DB {}".format(dbname), dbname)
return None
except Exception as e:
event_log("There was an error editing DB {} - {}".format(dbname, str(e)), dbname)
return str(e)
| 33.153846
| 124
| 0.709977
|
98bdca82b38af55049452ef874b9125496829e69
| 338
|
py
|
Python
|
docs/conf.py
|
NicDom/pyoverload
|
88d4421aedec55a1f43442daabf2a3a91bb5260a
|
[
"MIT"
] | 1
|
2021-10-05T20:15:15.000Z
|
2021-10-05T20:15:15.000Z
|
docs/conf.py
|
NicDom/pyoverload
|
88d4421aedec55a1f43442daabf2a3a91bb5260a
|
[
"MIT"
] | 48
|
2021-09-27T07:25:36.000Z
|
2022-03-30T10:27:01.000Z
|
docs/conf.py
|
NicDom/overloadlib
|
88d4421aedec55a1f43442daabf2a3a91bb5260a
|
[
"MIT"
] | null | null | null |
"""Sphinx configuration."""
from datetime import datetime
project = "Overloadlib"
author = "Niclas D. Gesing"
copyright = f"{datetime.now().year}, {author}"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_click",
"sphinx_rtd_theme",
]
autodoc_typehints = "description"
html_theme = "sphinx_rtd_theme"
| 22.533333
| 46
| 0.704142
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.