hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5ec5b3c5325c6647257558f508d48973873aa8c
| 10,505
|
py
|
Python
|
collector_center_sdk/model/cmdb/object_basic_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
collector_center_sdk/model/cmdb/object_basic_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
collector_center_sdk/model/cmdb/object_basic_info_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_basic_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from collector_center_sdk.model.cmdb import object_view_pb2 as collector__center__sdk_dot_model_dot_cmdb_dot_object__view__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_basic_info.proto',
package='cmdb',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/cmdb'),
serialized_pb=_b('\n\x17object_basic_info.proto\x12\x04\x63mdb\x1a\x31\x63ollector_center_sdk/model/cmdb/object_view.proto\"\x97\x03\n\x0fObjectBasicInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08objectId\x18\x02 \x01(\t\x12\x0c\n\x04memo\x18\x03 \x01(\t\x12\x1e\n\x04view\x18\x04 \x01(\x0b\x32\x10.cmdb.ObjectView\x12\x11\n\tprotected\x18\x05 \x01(\x08\x12\x17\n\x0fwordIndexDenied\x18\x06 \x01(\x08\x12\x10\n\x08\x63\x61tegory\x18\x07 \x01(\t\x12\x0c\n\x04icon\x18\x08 \x01(\t\x12\x0e\n\x06system\x18\t \x01(\t\x12\r\n\x05\x63time\x18\n \x01(\t\x12\r\n\x05mtime\x18\x0b \x01(\t\x12\x0f\n\x07\x63reator\x18\x0c \x01(\t\x12\x10\n\x08modifier\x18\r \x01(\t\x12\x0b\n\x03_ts\x18\x0e \x01(\x05\x12\x10\n\x08_version\x18\x0f \x01(\x05\x12\x19\n\x11updateAuthorizers\x18\x10 \x03(\t\x12\x19\n\x11\x64\x65leteAuthorizers\x18\x11 \x03(\t\x12\x12\n\nisAbstract\x18\x12 \x01(\x08\x12\x16\n\x0eparentObjectId\x18\x13 \x01(\t\x12\x18\n\x10permissionDenied\x18\x14 \x01(\x08\x42@Z>go.easyops.local/contracts/protorepo-models/easyops/model/cmdbb\x06proto3')
,
dependencies=[collector__center__sdk_dot_model_dot_cmdb_dot_object__view__pb2.DESCRIPTOR,])
_OBJECTBASICINFO = _descriptor.Descriptor(
name='ObjectBasicInfo',
full_name='cmdb.ObjectBasicInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='cmdb.ObjectBasicInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='cmdb.ObjectBasicInfo.objectId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='cmdb.ObjectBasicInfo.memo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='view', full_name='cmdb.ObjectBasicInfo.view', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protected', full_name='cmdb.ObjectBasicInfo.protected', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wordIndexDenied', full_name='cmdb.ObjectBasicInfo.wordIndexDenied', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='cmdb.ObjectBasicInfo.category', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='icon', full_name='cmdb.ObjectBasicInfo.icon', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='system', full_name='cmdb.ObjectBasicInfo.system', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='cmdb.ObjectBasicInfo.ctime', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mtime', full_name='cmdb.ObjectBasicInfo.mtime', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='cmdb.ObjectBasicInfo.creator', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='modifier', full_name='cmdb.ObjectBasicInfo.modifier', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='_ts', full_name='cmdb.ObjectBasicInfo._ts', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='_version', full_name='cmdb.ObjectBasicInfo._version', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateAuthorizers', full_name='cmdb.ObjectBasicInfo.updateAuthorizers', index=15,
number=16, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deleteAuthorizers', full_name='cmdb.ObjectBasicInfo.deleteAuthorizers', index=16,
number=17, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isAbstract', full_name='cmdb.ObjectBasicInfo.isAbstract', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parentObjectId', full_name='cmdb.ObjectBasicInfo.parentObjectId', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='permissionDenied', full_name='cmdb.ObjectBasicInfo.permissionDenied', index=19,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=85,
serialized_end=492,
)
_OBJECTBASICINFO.fields_by_name['view'].message_type = collector__center__sdk_dot_model_dot_cmdb_dot_object__view__pb2._OBJECTVIEW
DESCRIPTOR.message_types_by_name['ObjectBasicInfo'] = _OBJECTBASICINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ObjectBasicInfo = _reflection.GeneratedProtocolMessageType('ObjectBasicInfo', (_message.Message,), {
'DESCRIPTOR' : _OBJECTBASICINFO,
'__module__' : 'object_basic_info_pb2'
# @@protoc_insertion_point(class_scope:cmdb.ObjectBasicInfo)
})
_sym_db.RegisterMessage(ObjectBasicInfo)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 50.504808
| 1,051
| 0.741456
|
f85127ede3f6ee28b3566ad6faef9e6312b99853
| 2,010
|
py
|
Python
|
flexget/plugins/sites/eztv.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | 1
|
2018-05-02T21:14:50.000Z
|
2018-05-02T21:14:50.000Z
|
flexget/plugins/sites/eztv.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
flexget/plugins/sites/eztv.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse, urlunparse
import re
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
log = logging.getLogger('eztv')
EZTV_MIRRORS = [
('http', 'eztv.ch'),
('https', 'eztv-proxy.net'),
('http', 'eztv.come.in')]
class UrlRewriteEztv(object):
"""Eztv url rewriter."""
def url_rewritable(self, task, entry):
return urlparse(entry['url']).netloc == 'eztv.ch'
def url_rewrite(self, task, entry):
url = entry['url']
page = None
for (scheme, netloc) in EZTV_MIRRORS:
try:
_, _, path, params, query, fragment = urlparse(url)
url = urlunparse((scheme, netloc, path, params, query, fragment))
page = task.requests.get(url).content
except RequestException as e:
log.debug('Eztv mirror `%s` seems to be down', url)
continue
break
if not page:
raise UrlRewritingError('No mirrors found for url %s' % entry['url'])
log.debug('Eztv mirror `%s` chosen', url)
try:
soup = get_soup(page)
mirrors = soup.find_all('a', attrs={'class': re.compile(r'download_\d')})
except Exception as e:
raise UrlRewritingError(e)
log.debug('%d torrent mirrors found', len(mirrors))
if not mirrors:
raise UrlRewritingError('Unable to locate download link from url %s' % url)
entry['urls'] = [m.get('href') for m in mirrors]
entry['url'] = mirrors[0].get('href')
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteEztv, 'eztv', groups=['urlrewriter'], api_ver=2)
| 31.40625
| 87
| 0.629851
|
f4a2dfb67a560cfc4f6d08ba7ec4ab8fdddaafb3
| 828
|
py
|
Python
|
setup.py
|
neonnnnn/pyrfm
|
e88fe8cb7bf3062616d33826e955e828fc6d8ba6
|
[
"BSD-2-Clause"
] | 7
|
2020-05-31T01:47:27.000Z
|
2021-12-26T03:45:14.000Z
|
setup.py
|
neonnnnn/pyrfm
|
e88fe8cb7bf3062616d33826e955e828fc6d8ba6
|
[
"BSD-2-Clause"
] | 2
|
2019-12-01T01:18:38.000Z
|
2020-08-27T12:07:26.000Z
|
setup.py
|
neonnnnn/pyrfm
|
e88fe8cb7bf3062616d33826e955e828fc6d8ba6
|
[
"BSD-2-Clause"
] | 3
|
2021-03-17T13:46:56.000Z
|
2022-03-18T21:43:45.000Z
|
from __future__ import print_function
import os.path
import sys
import setuptools
from numpy.distutils.core import setup
DISTNAME = 'pyrfm'
DESCRIPTION = 'A python library for random feature maps.'
VERSION = '1.0.0'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.add_subpackage('pyrfm')
return config
if __name__ == '__main__':
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0, local_path)
setup(configuration=configuration,
name=DISTNAME,
maintainer='Kyohei Atarashi',
include_package_data=True,
version=VERSION,
zip_safe=False
)
| 23.657143
| 62
| 0.699275
|
a1b9baadc91d848aad06a02f583218ba7cb78fd3
| 4,462
|
py
|
Python
|
ui/flask_ui/app.py
|
Smithsonian/Mass-Georeferencing
|
bb7d81cd82684900003d3049764cd2d243325248
|
[
"Apache-2.0"
] | 5
|
2020-06-24T16:12:48.000Z
|
2021-11-08T09:46:02.000Z
|
ui/flask_ui/app.py
|
Smithsonian/Mass-Georeferencing
|
bb7d81cd82684900003d3049764cd2d243325248
|
[
"Apache-2.0"
] | 8
|
2020-07-06T21:11:58.000Z
|
2020-07-22T13:10:48.000Z
|
ui/flask_ui/app.py
|
Smithsonian/Mass-Georeferencing
|
bb7d81cd82684900003d3049764cd2d243325248
|
[
"Apache-2.0"
] | null | null | null |
#!flask/bin/python
#
# Flask test app
#
from flask import Flask, jsonify, request
from flask import Response
from flask import render_template
import simplejson as json
import logging
import locale
app_ver = "0.1"
# logging.basicConfig(stream=sys.stderr)
logging.basicConfig(filename='app.log',
level=logging.DEBUG,
filemode='a',
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%y-%m-%d %H:%M:%S'
)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Set locale for number format
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
app = Flask(__name__)
# From http://flask.pocoo.org/docs/1.0/patterns/apierrors/
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['error'] = self.message
return rv
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.errorhandler(404)
def page_not_found(e):
logging.error(e)
data = json.dumps({'error': "route not found"})
return Response(data, mimetype='application/json'), 404
@app.errorhandler(500)
def page_not_found(e):
logging.error(e)
data = json.dumps({'error': "system error"})
return Response(data, mimetype='application/json'), 500
@app.route('/results', methods=['GET', 'POST'])
def get_resultsfile():
# Instead of hard coding the json data, read from file
file = request.values.get('file')
images_path = "static/data/"
from pathlib import Path
file_stem = Path(file).stem
json_file = "{}/{}.json".format(images_path, file_stem)
# print(file_stem)
print(json_file)
with open(json_file) as jsonfile:
p = json.load(jsonfile)
print(p)
from PIL import Image
im = Image.open("{}/{}.jpg".format(images_path, file_stem))
# width to display
image_width = 960
image_height = (image_width / im.size[0]) * im.size[1]
data = []
for object in p["localized_object_annotations"]:
x = object["bounding_poly"]["normalized_vertices"][0]["x"] * image_width
y = object["bounding_poly"]["normalized_vertices"][0]["y"] * image_height
x_1 = object["bounding_poly"]["normalized_vertices"][1]["x"] * image_width
y_1 = object["bounding_poly"]["normalized_vertices"][1]["y"] * image_height
x_2 = object["bounding_poly"]["normalized_vertices"][2]["x"] * image_width
y_2 = object["bounding_poly"]["normalized_vertices"][2]["y"] * image_height
x_3 = object["bounding_poly"]["normalized_vertices"][3]["x"] * image_width
y_3 = object["bounding_poly"]["normalized_vertices"][3]["y"] * image_height
score = object["score"]
if (score >= 0.9):
border_color = "green"
elif (0.8 <= score < 0.9):
border_color = "yellow"
else:
border_color = "red"
object_data = {
'x': round(x),
'y': round(y),
'x_1': round(x_1),
'y_1': round(y_1),
'x_2': round(x_2),
'y_2': round(y_2),
'x_3': round(x_3),
'y_3': round(y_3),
'name': object["name"],
'score': object["score"],
'margin_top': y_1,
'margin_left': x,
'border_width': x_1 - x,
'border_height': y_2 - y_1,
'border_color': border_color
}
data.append(object_data)
return render_template('results.html', file=file, data=data, image_width=image_width, image_height=image_height)
@app.route('/', methods=['GET', 'POST'])
def get_list():
return render_template('index.html')
if __name__ == '__main__':
app.run()
| 29.355263
| 116
| 0.621022
|
9fce93a38fbe46e5e48b88c77f26cf8b35d8447c
| 4,676
|
py
|
Python
|
pipeline/benchmark.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | 1
|
2019-06-27T12:14:38.000Z
|
2019-06-27T12:14:38.000Z
|
pipeline/benchmark.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | 18
|
2019-07-14T17:40:22.000Z
|
2019-11-11T01:54:07.000Z
|
pipeline/benchmark.py
|
Atrus619/DeckOfCards
|
bf0668ea26041e7faab2b88a03d42ba6887d054a
|
[
"MIT"
] | null | null | null |
from config import Config as cfg
from pinochle.Game import Game
from util import db
from classes.Agent import Agent
from classes.Epsilon import Epsilon
import logging
import util.util as util
from classes.Human import Human
from util.Constants import Constants as cs
import matplotlib.pyplot as plt
from collections import OrderedDict
import time
import pipeline.util as pu
logging.basicConfig(format='%(levelname)s:%(message)s', level=cfg.logging_level)
def benchmark_test(primary_model, benchmark_model, num_games, benchmark_bot_name='benchmark_bot', run_id=None):
epsilon = Epsilon()
player_1 = Agent(name=cfg.bot_1_name, model=primary_model, epsilon=epsilon)
player_2 = Agent(name=benchmark_bot_name, model=benchmark_model, epsilon=epsilon)
if 'policy_net' in dir(player_1.model):
player_1.model.policy_net.eval()
if 'policy_net' in dir(player_2.model):
player_2.model.policy_net.eval()
game_output = []
for j in range(num_games):
player_list = [player_1, player_2]
game = Game(name="pinochle", players=player_list, run_id=run_id, current_cycle=None)
game.deal()
game_output.append(game.play())
winner_list, exp_df = pu.parse_game_output(game_output=game_output)
if run_id is not None: # Store history
db.upload_exp(df=exp_df)
return 1 - sum(winner_list) / len(winner_list)
def human_test(model):
epsilon = Epsilon()
player_1 = Agent(name=cfg.bot_1_name, model=model, epsilon=epsilon)
player_2 = Human("YOU")
if 'policy_net' in dir(model):
model.policy_net.eval()
# Set logging level to debug
logging.getLogger().setLevel(logging.DEBUG)
logging.info("Human test enabled, initializing AI uprising...")
# Initialize game
player_list = [player_1, player_2]
game = Game(name="pinochle", players=player_list, run_id=None, current_cycle=None, human_test=True)
game.deal()
game.play()
# Set logging level back to config
logging.getLogger().setLevel(cfg.logging_level)
def get_average_reward(run_id, previous_experience_id, agent_id, opponent_id):
df = db.get_rewards_by_id(run_id=run_id, previous_experience_id=previous_experience_id, agent_id=agent_id, opponent_id=opponent_id)
logging.debug(cfg.benchmark_freq * cfg.episodes_per_cycle)
logging.debug(df.sum())
average = df.sum() / (cfg.benchmark_freq * cfg.episodes_per_cycle)
logging.info("Average reward since last benchmark from self-play: " + str(round(average.reward, 2)))
return average.reward
def round_robin(model_list, num_games, verbose=True, plot=True, device='cuda:0'):
start_time = time.time()
epsilon = Epsilon
model_wins = OrderedDict()
for i, model in enumerate(model_list):
model_wins[f'Player {i}'] = [0, model]
if 'device' in dir(model) and model.device != device:
model.policy_net = model.policy_net.to(device)
for i, p1_model in enumerate(model_list):
for j, p2_model in enumerate(model_list):
if i < j:
round_start_time = time.time()
p1 = Agent(name=f'Player {i}', model=p1_model, epsilon=epsilon)
p2 = Agent(name=f'Player {j}', model=p2_model, epsilon=epsilon)
if verbose:
print(f'Player {i} vs. Player {j}...')
p1_wins = int(benchmark_test(primary_model=p1_model, benchmark_model=p2_model, num_games=num_games) * num_games)
p2_wins = int(num_games - p1_wins)
if verbose:
print(f'Player {i}: {p1_wins}\tPlayer {j}: {p2_wins}\tDuration: {util.get_pretty_time(time.time() - round_start_time)}')
print(cs.DIVIDER)
model_wins[p1.name][0] += p1_wins
model_wins[p2.name][0] += p2_wins
output = sorted(model_wins.items(), key=lambda kv: kv[1][0], reverse=True)
if verbose:
for i, model in enumerate(output):
print(f'Rank {i+1}: {model[0]} with {model[1][0]} wins')
total_games = len(model_list) / 2 * (len(model_list) - 1) * num_games
total_duration = time.time() - start_time
avg_time_per_game = total_duration / total_games
print(f'{total_games} total games played over {util.get_pretty_time(total_duration)} ({util.get_pretty_time(avg_time_per_game)} per game)')
if plot:
xs = [x[0] for x in model_wins.items()]
heights = [x[1][0] for x in model_wins.items()]
plt.bar(height=heights, x=xs)
plt.title('Round Robin Tournament Results')
plt.xlabel('Model')
plt.ylabel('Total Number of Wins')
return output
| 36.818898
| 147
| 0.673225
|
1e71964b744d46f34bb0ed3f9b9c9cae6fdb2a9a
| 3,092
|
py
|
Python
|
connectionManager.py
|
DanijelMi/ChuckTesta
|
0fbee58691f2b72b3400af4f993c8956dffe6762
|
[
"MIT"
] | null | null | null |
connectionManager.py
|
DanijelMi/ChuckTesta
|
0fbee58691f2b72b3400af4f993c8956dffe6762
|
[
"MIT"
] | null | null | null |
connectionManager.py
|
DanijelMi/ChuckTesta
|
0fbee58691f2b72b3400af4f993c8956dffe6762
|
[
"MIT"
] | null | null | null |
import network # For WiFI connection
from robust import MQTTClient
from ubinascii import hexlify
from machine import unique_id
# Change the file names from where to read configuration parameters
AP_FILENAME = "ap.txt"
WIFI_FILENAME = "wifi.txt"
MQTT_FILENAME = "mqtt.txt"
info = None
ID = hexlify(unique_id())
AP = network.WLAN(network.AP_IF)
wlan = network.WLAN(network.STA_IF)
mqttActive = False
def readFile(filename):
f = open(filename, 'r')
text = f.read().split('\r\n')
f.close()
info = {}
for i in text:
if len(i) > 0 and i[0] != '#':
info[i.split('=')[0]] = i.split('=')[1]
return info
# Reads configuration parameteres from a text file and sets the access point accordingly.
# Bool arg, to turn off or on.
def setAP(state):
AP.active(False)
if state:
try:
AP.active(True)
info = readFile(AP_FILENAME)
AP.config(essid=info['ssid'], channel=int(info['channel']), hidden=int(
info['hidden']), authmode=int(info['authmode']), password=info['password'])
except OSError:
print("AP config file does not exist")
def setWifi(state):
wlan.disconnect()
wlan.active(False)
if state:
try:
info = readFile(WIFI_FILENAME)
wlan.active(True)
if info['StaticIP'] == "DHCP":
wlan.ifconfig((wlan.ifconfig()[0], wlan.ifconfig()[1],
wlan.ifconfig()[2], wlan.ifconfig()[3]))
else:
wlan.ifconfig((info['StaticIP'], wlan.ifconfig()[1],
wlan.ifconfig()[2], wlan.ifconfig()[3]))
wlan.connect(info['SSID'], info['NetworkKey'])
except OSError:
print("Wifi config file does not exist")
# Reads configuration parameteres from a text file and sets the MQTT connection accordingly
# Set to false to turn mqtt off, or for ex. put in "mqtt.txt" to turn it on
def setMQTT(callback):
global info
try:
info = readFile(MQTT_FILENAME)
client = MQTTClient(client_id=ID, server=info['host'],
user=info['username'], password=info['password'], port=int(info['port']))
client.set_last_will(topic=info['topic'], msg=info['lastWillMsg'])
# Print diagnostic messages when retries/reconnects happens
client.DEBUG = (info['consoleVerbose'] == "True")
client.set_callback(callback)
return client
except OSError:
print("MQTT config file does not exist")
def connMQTT(obj, state):
global mqttActive
if not state:
if mqttActive:
obj.disconnect()
mqttActive = False
return False
elif state:
try:
# info = readFile(MQTT_FILENAME)
obj.connect()
obj.subscribe(info['topic'])
mqttActive = True
return True
except OSError:
mqttActive = False
return False
| 32.208333
| 102
| 0.573739
|
f534fb1b0f1ee9f55bd8cdf65d71c31dd0e0dc93
| 40,948
|
py
|
Python
|
restapi/flask_ext/flask_irods/client.py
|
beetleman/http-api
|
6839e57543cf492e1f281ca9f5604c0058a17f81
|
[
"MIT"
] | null | null | null |
restapi/flask_ext/flask_irods/client.py
|
beetleman/http-api
|
6839e57543cf492e1f281ca9f5604c0058a17f81
|
[
"MIT"
] | null | null | null |
restapi/flask_ext/flask_irods/client.py
|
beetleman/http-api
|
6839e57543cf492e1f281ca9f5604c0058a17f81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from functools import lru_cache
from flask import request, stream_with_context, Response
from utilities import htmlcodes as hcodes
from irods.access import iRODSAccess
from irods.rule import Rule
from irods.ticket import Ticket
from irods.models import User, UserGroup, UserAuth
from irods import exception as iexceptions
from restapi.exceptions import RestApiException
from utilities.logs import get_logger
log = get_logger(__name__)
class IrodsException(RestApiException):
pass
class IrodsPythonClient():
anonymous_user = 'anonymous'
def __init__(self, prc, variables, default_chunk_size=1048576):
self.prc = prc
self.variables = variables
self.chunk_size = self.variables.get('chunksize', default_chunk_size)
def connect(self):
return self
def get_collection_from_path(self, absolute_path):
return os.path.dirname(absolute_path)
def get_absolute_path(self, *args, root=None):
if len(args) < 1:
return root
if root is None and not args[0].startswith('/'):
root = '/'
return os.path.join(root, *args)
# ##################################
# ##################################
# Re-implemented wrappers
# ##################################
# ##################################
def exists(self, path):
if self.is_collection(path):
return True
if self.is_dataobject(path):
return True
return False
def is_collection(self, path):
return self.prc.collections.exists(path)
def is_dataobject(self, path):
try:
self.prc.data_objects.get(path)
return True
except iexceptions.CollectionDoesNotExist:
return False
except iexceptions.DataObjectDoesNotExist:
return False
def get_dataobject(self, path):
try:
obj = self.prc.data_objects.get(path)
return obj
except (
iexceptions.CollectionDoesNotExist,
iexceptions.DataObjectDoesNotExist
):
raise IrodsException("%s not found or no permissions" % path)
def getPath(self, path, prefix=None):
if prefix is None:
length = 0
else:
length = len(prefix)
if length > 0:
path = path[length:]
if path[0] == "/":
path = path[1:]
return os.path.dirname(path)
def list(self, path=None, recursive=False, detailed=False,
acl=False, removePrefix=None):
""" List the files inside an iRODS path/collection """
if path is None:
path = self.get_user_home()
if self.is_dataobject(path):
raise IrodsException(
"Cannot list a Data Object; you may get it instead.")
try:
data = {}
root = self.prc.collections.get(path)
for coll in root.subcollections:
row = {}
key = coll.name
row["PID"] = None
row["name"] = coll.name
row["objects"] = {}
if recursive:
row["objects"] = self.list(
path=coll.path,
recursive=recursive,
detailed=detailed,
acl=acl,
removePrefix=removePrefix
)
row["path"] = self.getPath(coll.path, removePrefix)
row["object_type"] = "collection"
if detailed:
row["owner"] = "-"
if acl:
acl = self.get_permissions(coll)
row["acl"] = acl["ACL"]
row["acl_inheritance"] = acl["inheritance"]
data[key] = row
for obj in root.data_objects:
row = {}
key = obj.name
row["name"] = obj.name
row["path"] = self.getPath(obj.path, removePrefix)
row["object_type"] = "dataobject"
row["PID"] = None
row["checksum"] = None
if detailed:
row["owner"] = obj.owner_name
row["content_length"] = obj.size
row["created"] = obj.create_time
row["last_modified"] = obj.modify_time
if acl:
acl = self.get_permissions(obj)
row["acl"] = acl["ACL"]
row["acl_inheritance"] = acl["inheritance"]
data[key] = row
return data
except iexceptions.CollectionDoesNotExist:
raise IrodsException("Not found (or no permission): %s" % path)
# replicas = []
# for line in lines:
# replicas.append(re.split("\s+", line.strip()))
# return replicas
def create_empty(self, path, directory=False, ignore_existing=False):
if directory:
return self.create_directory(path, ignore_existing)
else:
return self.create_file(path, ignore_existing)
def create_directory(self, path, ignore_existing=False):
# print("TEST", path, ignore_existing)
try:
ret = self.prc.collections.create(
path, recurse=ignore_existing)
log.debug("Created irods collection: %s", path)
return ret
except iexceptions.CAT_UNKNOWN_COLLECTION:
raise IrodsException("Unable to create collection, invalid path")
except iexceptions.CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME:
if not ignore_existing:
raise IrodsException(
"Irods collection already exists",
status_code=hcodes.HTTP_BAD_REQUEST)
else:
log.debug("Irods collection already exists: %s", path)
except (
iexceptions.CAT_NO_ACCESS_PERMISSION,
iexceptions.SYS_NO_API_PRIV
):
raise IrodsException("You have no permissions on path %s" % path)
return None
def create_file(self, path, ignore_existing=False):
try:
ret = self.prc.data_objects.create(path)
log.debug("Create irods object: %s", path)
return ret
except iexceptions.CAT_NO_ACCESS_PERMISSION:
raise IrodsException("CAT_NO_ACCESS_PERMISSION")
except iexceptions.SYS_INTERNAL_NULL_INPUT_ERR:
raise IrodsException("Unable to create object, invalid path")
except iexceptions.OVERWRITE_WITHOUT_FORCE_FLAG:
if not ignore_existing:
raise IrodsException(
"Irods object already exists",
status_code=hcodes.HTTP_BAD_REQUEST)
log.debug("Irods object already exists: %s", path)
return False
def icopy(self, sourcepath, destpath, ignore_existing=False, warning=None):
# Replace 'copy'
from irods.manager.data_object_manager import DataObjectManager
dm = DataObjectManager(self.prc)
if warning is None:
warning = 'Irods object already exists'
try:
dm.copy(sourcepath, destpath)
except iexceptions.OVERWRITE_WITHOUT_FORCE_FLAG:
if not ignore_existing:
raise IrodsException(
"Irods object already exists",
status_code=hcodes.HTTP_BAD_REQUEST)
log.warning("%s: %s", warning, destpath)
else:
log.debug("Copied: %s -> %s", sourcepath, destpath)
def put(self, local_path, irods_path):
# NOTE: this action always overwrite
return self.prc.data_objects.put(local_path, irods_path)
def copy(self, sourcepath, destpath,
recursive=False, force=False,
compute_checksum=False, compute_and_verify_checksum=False):
if recursive:
log.error("Recursive flag not implemented for copy")
if self.is_collection(sourcepath):
raise IrodsException("Copy directory not supported")
if compute_checksum:
raise IrodsException(
"Compute_checksum not supported in copy")
if compute_and_verify_checksum:
raise IrodsException(
"Compute_and_verify_checksum not supported in copy")
if sourcepath == destpath:
raise IrodsException(
"Source and destination path are the same")
try:
log.verbose("Copy %s into %s" % (sourcepath, destpath))
source = self.prc.data_objects.get(sourcepath)
self.create_empty(
destpath, directory=False, ignore_existing=force)
target = self.prc.data_objects.get(destpath)
with source.open('r+') as f:
with target.open('w') as t:
for line in f:
# if t.writable():
t.write(line)
except iexceptions.DataObjectDoesNotExist:
raise IrodsException(
"DataObject not found (or no permission): %s" % sourcepath)
except iexceptions.CollectionDoesNotExist:
raise IrodsException(
"Collection not found (or no permission): %s" % sourcepath)
def move(self, src_path, dest_path):
try:
if self.is_collection(src_path):
self.prc.collections.move(src_path, dest_path)
log.debug("Renamed collection: %s->%s", src_path, dest_path)
else:
self.prc.data_objects.move(src_path, dest_path)
log.debug("Renamed irods object: %s->%s", src_path, dest_path)
except iexceptions.CAT_RECURSIVE_MOVE:
raise IrodsException("Source and destination path are the same")
except iexceptions.SAME_SRC_DEST_PATHS_ERR:
raise IrodsException("Source and destination path are the same")
except iexceptions.CAT_NO_ROWS_FOUND:
raise IrodsException("Invalid source or destination")
except iexceptions.CAT_NAME_EXISTS_AS_DATAOBJ:
# raised from both collection and data objects?
raise IrodsException("Destination path already exists")
except BaseException as e:
log.error("%s(%s)", e.__class__.__name__, e)
raise IrodsException("System error; failed to move.")
def remove(self, path, recursive=False, force=False, resource=None):
try:
if self.is_collection(path):
self.prc.collections.remove(
path, recurse=recursive, force=force)
log.debug("Removed irods collection: %s", path)
else:
self.prc.data_objects.unlink(path, force=force)
log.debug("Removed irods object: %s", path)
except iexceptions.CAT_COLLECTION_NOT_EMPTY:
if recursive:
raise IrodsException(
"Error deleting non empty directory")
else:
raise IrodsException(
"Cannot delete non empty directory without recursive flag")
except iexceptions.CAT_NO_ROWS_FOUND:
raise IrodsException("Irods delete error: path not found")
# FIXME: remove resource
# if resource is not None:
# com = 'itrim'
# args = ['-S', resource]
# Try with:
# self.prc.resources.remove(name, test=dryRunTrueOrFalse)
def write_file_content(self, path, content, position=0):
try:
obj = self.prc.data_objects.get(path)
with obj.open('w+') as handle:
if position > 0 and handle.seekable():
handle.seek(position)
if handle.writable():
# handle.write('foo\nbar\n')
a_buffer = bytearray()
a_buffer.extend(map(ord, content))
handle.write(a_buffer)
handle.close()
except iexceptions.DataObjectDoesNotExist:
raise IrodsException("Cannot write to file: not found")
def get_file_content(self, path):
try:
data = []
obj = self.prc.data_objects.get(path)
with obj.open('r+') as handle:
if handle.readable():
for line in handle:
s = line.decode("utf-8")
data.append(s)
return data
except iexceptions.DataObjectDoesNotExist:
raise IrodsException("Cannot read file: not found")
def open(self, absolute_path, destination):
try:
obj = self.prc.data_objects.get(absolute_path)
# TODO: could use io package?
with obj.open('r') as handle:
with open(destination, "wb") as target:
for line in handle:
target.write(line)
return True
except iexceptions.DataObjectDoesNotExist:
raise IrodsException("Cannot read file: not found")
return False
def read_in_chunks(self, file_object, chunk_size=None):
"""
Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
"""
if chunk_size is None:
chunk_size = self.chunk_size
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def write_in_chunks(self, target, chunk_size=None):
if chunk_size is None:
chunk_size = self.chunk_size
while True:
chunk = request.stream.read(chunk_size)
# print("\n\n\nCONTENT", chunk)
if not chunk:
break
target.write(chunk)
def read_in_streaming(self, absolute_path, headers=None):
"""
Reads obj from iRODS without saving a local copy
"""
log.info("Downloading file %s in streaming with chunk size %s",
absolute_path, self.chunk_size)
try:
obj = self.prc.data_objects.get(absolute_path)
# NOTE: what about binary option?
handle = obj.open('r')
if headers is None:
headers = {}
return Response(
stream_with_context(
self.read_in_chunks(handle, self.chunk_size)),
headers=headers,
)
except iexceptions.DataObjectDoesNotExist:
raise IrodsException("Cannot read file: not found")
def write_in_streaming(self,
destination, force=False,
resource=None, binary=False):
"""
Writes obj to iRODS without saving a local copy
"""
# FIXME: resource is currently not used!
# log.warning("Resource not used in saving irods data...")
if not force and self.is_dataobject(destination):
log.warn("Already exists")
raise IrodsException(
"File '" + destination + "' already exists. " +
"Change file name or use the force parameter")
log.info("Uploading file in streaming to %s with chunk size %s",
destination, self.chunk_size)
try:
self.create_empty(
destination, directory=False, ignore_existing=force)
obj = self.prc.data_objects.get(destination)
# Based on:
# https://blog.pelicandd.com/article/80/streaming-input-and-output-in-flask
# https://github.com/pallets/flask/issues/2086#issuecomment-261962321
try:
# NOTE binary option for non ASCII files
mode = 'w'
if binary:
mode = 'w+'
with obj.open(mode) as target:
self.write_in_chunks(target, self.chunk_size)
except BaseException as ex:
log.critical("Failed streaming upload: %s", ex)
# Should I remove file from iRODS if upload failed?
log.debug("Removing object from irods")
self.remove(destination, force=True)
raise ex
return True
except iexceptions.CollectionDoesNotExist:
log.critical("Failed streaming upload: collection not found")
raise IrodsException("Cannot write to file: path not found")
# except iexceptions.DataObjectDoesNotExist:
# raise IrodsException("Cannot write to file: not found")
except BaseException as ex:
log.critical("Failed streaming upload: %s", ex)
raise ex
return False
def save(self,
path, destination, force=False, resource=None, chunk_size=None):
if chunk_size is None:
chunk_size = self.chunk_size
# FIXME: resource is not used!
# log.warning("Resource not used in saving irods data...")
try:
with open(path, "rb") as handle:
self.create_empty(
destination, directory=False, ignore_existing=force)
obj = self.prc.data_objects.get(destination)
try:
with obj.open('w') as target:
# for line in handle:
# target.write(line)
while True:
piece = handle.read(chunk_size)
if not piece:
break
# if len(piece) > 0:
target.write(piece)
except BaseException as e:
self.remove(destination, force=True)
raise e
return True
except iexceptions.CollectionDoesNotExist:
raise IrodsException("Cannot write to file: path not found")
# except iexceptions.DataObjectDoesNotExist:
# raise IrodsException("Cannot write to file: not found")
return False
############################################
# ############ ACL Management ##############
############################################
def get_permissions(self, coll_or_obj):
if type(coll_or_obj) is str:
if self.is_collection(coll_or_obj):
coll_or_obj = self.prc.collections.get(coll_or_obj)
elif self.is_dataobject(coll_or_obj):
coll_or_obj = self.prc.data_objects.get(coll_or_obj)
else:
coll_or_obj = None
if coll_or_obj is None:
raise IrodsException("Cannot get permission of a null object")
data = {}
data["path"] = coll_or_obj.path
data["ACL"] = []
acl_list = self.prc.permissions.get(coll_or_obj)
for acl in acl_list:
data["ACL"].append([
acl.user_name,
acl.user_zone,
acl.access_name
])
# FIXME: how to retrieve inheritance?
data["inheritance"] = "N/A"
return data
def enable_inheritance(self, path, zone=None):
if zone is None:
zone = self.get_current_zone()
key = 'inherit'
ACL = iRODSAccess(access_name=key, path=path, user_zone=zone)
try:
self.prc.permissions.set(ACL) # , recursive=False)
log.verbose("Enabled %s to %s", key, path)
except iexceptions.CAT_INVALID_ARGUMENT:
if not self.is_collection(path) and not self.is_dataobject(path):
raise IrodsException("Cannot set Inherit: path not found")
else:
raise IrodsException("Cannot set Inherit")
return False
else:
return True
def create_collection_inheritable(self, ipath, user, permissions='own'):
# Create the directory
self.create_empty(ipath, directory=True, ignore_existing=True)
# This user will own the directory
self.set_permissions(
ipath, permission=permissions, userOrGroup=user)
# Let the permissions scale to subelements
self.enable_inheritance(ipath)
def set_permissions(self, path, permission=None, userOrGroup=None,
zone=None, recursive=False):
if zone is None:
zone = self.get_current_zone()
# If not specified, remove permission
if permission is None:
permission = 'null'
try:
ACL = iRODSAccess(
access_name=permission,
path=path,
user_name=userOrGroup,
user_zone=zone)
self.prc.permissions.set(ACL, recursive=recursive)
log.debug("Grant %s=%s to %s", userOrGroup, permission, path)
return True
except iexceptions.CAT_INVALID_USER:
raise IrodsException("Cannot set ACL: user or group not found")
except iexceptions.CAT_INVALID_ARGUMENT:
if not self.is_collection(path) and not self.is_dataobject(path):
raise IrodsException("Cannot set ACL: path not found")
else:
raise IrodsException("Cannot set ACL")
return False
def set_inheritance(self, path, inheritance=True, recursive=False):
try:
if inheritance:
permission = "inherit"
else:
permission = "noinherit"
ACL = iRODSAccess(
access_name=permission,
path=path,
user_name='',
user_zone='')
self.prc.permissions.set(ACL, recursive=recursive)
log.debug("Set inheritance %r to %s", inheritance, path)
return True
except iexceptions.CAT_NO_ACCESS_PERMISSION:
if self.is_dataobject(path):
raise IrodsException("Cannot set inheritance to a data object")
else:
raise IrodsException(
"Cannot set inheritance: collection not found")
return False
def get_user_home(self, user=None):
zone = self.get_current_zone(prepend_slash=True)
if user is None:
user = self.get_current_user()
home = self.variables.get('home', 'home')
if home.startswith(zone):
home = home[len(zone):]
path = os.path.join(zone, home.lstrip('/'), user)
return path
# if user == self.variables.get('user'):
# home = self.variables.get('home')
# else:
# home = os.path.join('home', user)
# if home.startswith("/"):
# if home.startswith(zone):
# home = home[len(zone):]
# else:
# home = home[1:]
# path = os.path.join(zone, home.lstrip('/'))
# return path
def get_current_user(self):
return self.prc.username
def get_current_zone(self, prepend_slash=False, suffix=None):
zone = self.prc.zone
has_suffix = suffix is not None
if prepend_slash or has_suffix:
zone = '/' + zone
if has_suffix:
return zone + '/' + suffix
else:
return zone
@lru_cache(maxsize=4)
def get_user_info(self, username=None):
if username is None:
username = self.get_current_user()
try:
user = self.prc.users.get(username)
data = {}
data["id"] = user.id
data["name"] = user.name
data["type"] = user.type
data["zone"] = user.zone
# data["info"] = ""
# data["comment"] = ""
# data["create time"] = ""
# data["modify time"] = ""
data["account"] = user.manager.sess.pool.account.__dict__
results = self.prc.query(UserGroup.name).filter(
User.name == user.name).get_results()
groups = []
for obj in results:
for _, grp in obj.items():
groups.append(grp)
data['groups'] = groups
return data
except iexceptions.UserDoesNotExist:
return None
def user_has_group(self, username, groupname):
info = self.get_user_info(username)
if info is None:
return False
if 'groups' not in info:
return False
return groupname in info['groups']
# TODO: merge the two following 'user_exists'
def check_user_exists(self, username, checkGroup=None):
userdata = self.get_user_info(username)
if userdata is None:
return False, "User %s does not exist" % username
if checkGroup is not None:
if checkGroup not in userdata['groups']:
return False, "User %s is not in group %s" %\
(username, checkGroup)
return True, "OK"
def query_user_exists(self, user):
results = self.prc.query(User.name).filter(User.name == user).first()
if results is None:
return False
elif results[User.name] == user:
return True
else:
raise AttributeError("Failed to query")
def get_metadata(self, path):
try:
if (self.is_collection(path)):
obj = self.prc.collections.get(path)
else:
obj = self.prc.data_objects.get(path)
data = {}
units = {}
for meta in obj.metadata.items():
name = meta.name
data[name] = meta.value
units[name] = meta.units
return data, units
except (
iexceptions.CollectionDoesNotExist,
iexceptions.DataObjectDoesNotExist
):
raise IrodsException("Cannot extract metadata, object not found")
def remove_metadata(self, path, key):
if (self.is_collection(path)):
obj = self.prc.collections.get(path)
else:
obj = self.prc.data_objects.get(path)
tmp = None
for meta in obj.metadata.items():
if key == meta.name:
tmp = meta
break
# print(tmp)
if tmp is not None:
obj.metadata.remove(tmp)
def set_metadata(self, path, **meta):
try:
if (self.is_collection(path)):
obj = self.prc.collections.get(path)
else:
obj = self.prc.data_objects.get(path)
for key, value in meta.items():
obj.metadata.add(key, value)
except iexceptions.CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME:
raise IrodsException("This metadata already exist")
except iexceptions.DataObjectDoesNotExist:
raise IrodsException("Cannot set metadata, object not found")
def get_user_from_dn(self, dn):
results = self.prc.query(User.name, UserAuth.user_dn) \
.filter(UserAuth.user_dn == dn).first()
if results is not None:
return results.get(User.name)
else:
return None
def create_user(self, user, admin=False):
if user is None:
log.error("Asking for NULL user...")
return False
user_type = 'rodsuser'
if admin:
user_type = 'rodsadmin'
try:
user_data = self.prc.users.create(user, user_type)
log.info("Created user: %s", user_data)
except iexceptions.CATALOG_ALREADY_HAS_ITEM_BY_THAT_NAME:
log.warning("User %s already exists in iRODS", user)
return False
return True
def modify_user_password(self, user, password):
log.debug("Changing %s password", user)
return self.prc.users.modify(user, 'password', password)
def remove_user(self, user_name):
user = self.prc.users.get(user_name)
log.warning("Removing user: %s", user_name)
return user.remove()
def list_user_attributes(self, user):
try:
data = self.prc.query(
User.id, User.name, User.type, User.zone
).filter(User.name == user).one()
except iexceptions.NoResultFound:
return None
try:
auth_data = self.prc.query(
UserAuth.user_dn
).filter(UserAuth.user_id == data[User.id]).one()
dn = auth_data.get(UserAuth.user_dn)
except iexceptions.NoResultFound:
dn = None
return {
'name': data[User.name],
'type': data[User.type],
'zone': data[User.zone],
'dn': dn
}
def modify_user_dn(self, user, dn, zone):
# addAuth / rmAuth
self.prc.users.modify(user, 'addAuth', dn)
# self.prc.users.modify(user, 'addAuth', dn, user_zone=zone)
def rule(self, name, body, inputs, output=False):
import textwrap
rule_body = textwrap.dedent('''\
%s {{
%s
}}''' % (name, body))
outname = None
if output:
outname = 'ruleExecOut'
myrule = Rule(self.prc, body=rule_body, params=inputs, output=outname)
try:
raw_out = myrule.execute()
except BaseException as e:
msg = 'Irule failed: %s' % e.__class__.__name__
log.error(msg)
log.warning(e)
# raise IrodsException(msg)
raise e
else:
log.debug("Rule %s executed: %s", name, raw_out)
# retrieve out buffer
if output and len(raw_out.MsParam_PI) > 0:
out_array = raw_out.MsParam_PI[0].inOutStruct
# print("out array", out_array)
import re
file_coding = 'utf-8'
buf = out_array.stdoutBuf.buf
if buf is not None:
# it's binary data (BinBytesBuf) so must be decoded
buf = buf.decode(file_coding)
buf = re.sub(r'\s+', '', buf)
buf = re.sub(r'\\x00', '', buf)
buf = buf.rstrip('\x00')
log.debug("Out buff: %s", buf)
err_buf = out_array.stderrBuf.buf
if err_buf is not None:
err_buf = err_buf.decode(file_coding)
err_buf = re.sub(r'\s+', '', err_buf)
log.debug("Err buff: %s", err_buf)
return buf
return raw_out
"""
# EXAMPLE FOR IRULE: #METADATA RULE
object_path = "/sdcCineca/home/httpadmin/tmp.txt"
test_name = 'paolo2'
inputs = { # extra quotes for string literals
'*object': '"%s"' % object_path,
'*name': '"%s"' % test_name,
'*value': '"%s"' % test_name,
}
body = \"\"\"
# add metadata
*attribute.*name = *value;
msiAssociateKeyValuePairsToObj(*attribute, *object, "-d")
\"\"\"
output = imain.irule('test', body, inputs, 'ruleExecOut')
print("TEST", output)
# log.pp(output)
"""
def ticket(self, path):
ticket = Ticket(self.prc)
# print("TEST", self.prc, path)
ticket.issue('read', path)
return ticket
def ticket_supply(self, code):
# use ticket for access
ticket = Ticket(self.prc, code)
ticket.supply()
def test_ticket(self, path):
# self.ticket_supply(code)
try:
with self.prc.data_objects.open(path, 'r') as obj:
obj.__class__.__name__
except iexceptions.SYS_FILE_DESC_OUT_OF_RANGE:
return False
else:
return True
def stream_ticket(self, path, headers=None):
obj = self.prc.data_objects.open(path, 'r')
return Response(
stream_with_context(
self.read_in_chunks(obj, self.chunk_size)),
headers=headers,
)
def list_tickets(self, user=None):
from irods.models import Ticket, DataObject
try:
data = self.prc.query(
# Ticket.id,
Ticket.string, Ticket.type, User.name, DataObject.name,
Ticket.uses_limit, Ticket.uses_count,
Ticket.expiration
).all()
# ).filter(User.name == user).one()
# for obj in data:
# print("TEST", obj)
# # for _, grp in obj.items():
except iexceptions.NoResultFound:
return None
else:
return data
# ####################################################
# ####################################################
# ####################################################
# FROM old client.py:
# ####################################################
# ####################################################
# ####################################################
# def query_icat(self, query, key):
# com = 'iquest'
# args = ["%s" % query]
# output = self.basic_icom(com, args)
# log.debug("%s query: [%s]\n%s" % (com, query, output))
# if 'CAT_NO_ROWS_FOUND' in output:
# return None
# return output.split('\n')[0].lstrip("%s = " % key)
# def query_user(self, select="USER_NAME", where="USER_NAME", field=None):
# query = "SELECT %s WHERE %s = '%s'" % (select, where, field)
# return self.query_icat(query, select)
# def get_base_dir(self):
# com = "ipwd"
# iout = self.basic_icom(com).strip()
# log.very_verbose("Base dir is %s" % iout)
# return iout
# ############################################
# # ######### Resources Management ###########
# ############################################
# # for resources use this object manager:
# # self.prc.resources
# def list_resources(self):
# com = 'ilsresc'
# iout = self.basic_icom(com).strip()
# log.debug("Resources %s" % iout)
# return iout.split("\n")
# def get_base_resource(self):
# resources = self.list_resources()
# if len(resources) > 0:
# return resources[0]
# return None
# def get_resources_from_file(self, filepath):
# output = self.list(path=filepath, detailed=True)
# resources = []
# for elements in output:
# # elements = line.split()
# if len(elements) < 3:
# continue
# resources.append(elements[2])
# log.debug("%s: found resources %s" % (filepath, resources))
# return resources
# def admin(self, command, user=None, extra=None):
# """
# Admin commands to manage users and stuff like that.
# Note: it will give irods errors if current user has not privileges.
# """
# com = 'iadmin'
# args = [command]
# if user is not None:
# args.append(user)
# if extra is not None:
# args.append(extra)
# log.debug("iRODS admininistration command '%s'" % command)
# return self.basic_icom(com, args)
# def admin_list(self):
# """
# How to explore collections in a debug way
# """
# return self.admin('ls')
# # FIXME:
# def get_current_user_environment(self):
# com = 'ienv'
# output = self.basic_icom(com)
# print("ENV IS", output)
# return output
# def current_location(self, ifile):
# """
# irods://130.186.13.14:1247/cinecaDMPZone/home/pdonorio/replica/test2
# """
# protocol = 'irods'
# URL = "%s://%s:%s%s" % (
# protocol,
# self._current_environment['IRODS_HOST'],
# self._current_environment['IRODS_PORT'],
# os.path.join(self._base_dir, ifile))
# return URL
# def get_resource_from_dataobject(self, ifile):
# """ The attribute of resource from a data object """
# details = self.list(ifile, True)
# resources = []
# for element in details:
# # 2nd position is the resource in irods ils -l
# resources.append(element[2])
# return resources
# def get_resources_admin(self):
# resources = []
# out = self.admin(command='lr')
# if isinstance(out, str):
# resources = out.strip().split('\n')
# return resources
# def get_default_resource_admin(self, skip=['bundleResc']):
# # FIXME: find out the right way to get the default irods resource
# # note: we could use ienv
# resources = self.get_resources_admin()
# if len(resources) > 0:
# # Remove strange resources
# for element in skip:
# if element in resources:
# resources.pop(resources.index(element))
# return list(resources)[::-1].pop()
# return None
# def handle_collection_path(self, ipath):
# """
# iRODS specific pattern to handle paths
# """
# home = self.get_base_dir()
# # Should add the base dir if doesn't start with /
# if ipath is None or ipath == '':
# ipath = home
# elif ipath[0] != '/':
# ipath = home + '/' + ipath
# else:
# current_zone = self.get_current_zone()
# if not ipath.startswith('/' + current_zone):
# # Add the zone
# ipath = '/' + current_zone + ipath
# # Append / if missing in the end
# if ipath[-1] != '/':
# ipath += '/'
# return ipath
# def get_irods_path(self, collection, filename=None):
# path = self.handle_collection_path(collection)
# if filename is not None:
# path += filename
# return path
# # def get_default_user(self):
# # return IRODS_DEFAULT_USER
# def translate_graph_user(self, graph, graph_user):
# from restapi.services.irods.translations import Irods2Graph
# return Irods2Graph(graph, self).graphuser2irodsuser(graph_user)
# ################################################
# ################################################
# # NEED TO CHECK ALL OF THIS ICOMMANDS BELOW
# ################################################
# ################################################
# def search(self, path, like=True):
# com = "ilocate"
# if like:
# path += '%'
# log.debug("iRODS search for %s" % path)
# # Execute
# out = self.execute_command(com, path)
# content = out.strip().split('\n')
# print("TEST", content)
# return content
# def replica(self, dataobj, replicas_num=1, resOri=None, resDest=None):
# """ Replica
# Replicate a file in iRODS to another storage resource.
# Note that replication is always within a zone.
# """
# com = "irepl"
# if resOri is None:
# resOri = self.first_resource
# if resDest is None:
# resDest = self.second_resource
# args = [dataobj]
# args.append("-P") # debug copy
# args.append("-n")
# args.append(replicas_num)
# # Ori
# args.append("-S")
# args.append(resOri)
# # Dest
# args.append("-R")
# args.append(resDest)
# return self.basic_icom(com, args)
# def replica_list(self, dataobj):
# return self.get_resource_from_dataobject(dataobj)
def get_and_verify_irods_session(function, parameters):
obj = None
username = parameters.get('user')
try:
obj = function(**parameters)
except iexceptions.CAT_INVALID_USER:
log.warning("Invalid user: %s", username)
except iexceptions.UserDoesNotExist:
log.warning("Invalid iCAT user: %s", username)
except iexceptions.CAT_INVALID_AUTHENTICATION:
log.warning("Invalid password for %s", username)
# This problem below should not happen anymore
# except iexceptions.MultipleResultsFound:
# raise IrodsException(
# "User %s belonging to multiple iRODS zones" % username)
except BaseException as e:
log.warning("Failed with unknown reason:\n[%s] \"%s\"", type(e), e)
error = \
'Failed to verify credentials against B2SAFE. ' + \
'Unknown error: '
if str(e).strip() == '':
error += e.__class__.__name__
else:
error *= str(e)
raise IrodsException(error)
return obj
| 33.426939
| 87
| 0.537242
|
a97adecdf0a8f6ae3f59c4da2cdf1a1c4839ef71
| 579
|
py
|
Python
|
Playground/bubble_sort.py
|
jonasluz/mia-cana
|
834550622a2968c5c3f8a2a90f6e9cd41603ee57
|
[
"Unlicense"
] | null | null | null |
Playground/bubble_sort.py
|
jonasluz/mia-cana
|
834550622a2968c5c3f8a2a90f6e9cd41603ee57
|
[
"Unlicense"
] | null | null | null |
Playground/bubble_sort.py
|
jonasluz/mia-cana
|
834550622a2968c5c3f8a2a90f6e9cd41603ee57
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 15:38:56 2017
@author: Jonas Luz Jr. <unifor@jonasluz.com>
Implementação do algoritmo de ordenação do método da bolha... bubblesort.
"""
def bubbleSort(values: list) -> list:
"""
Método de ordenação da bolha (ineficiente).
"""
for i in range(0, len(values)-1):
for j in range(len(values)-1, i, -1):
if values[j] < values[j-1]:
values[j], values[j-1] = values[j-1], values[j]
return values
## Test
#
print(bubbleSort([1,5,2,10,4,7,9,3,2,6,0,12]))
| 22.269231
| 73
| 0.568221
|
a36e3cae1c05271d7f3d6e7c6b2c3b8c5a5156c5
| 547
|
py
|
Python
|
6 kyu/Reverse polish notation calculator.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/Reverse polish notation calculator.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/Reverse polish notation calculator.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def calc(expr):
try:
return eval(expr)
except:
if not expr:
return 0
res=expr.split()
while "+" in res or "-" in res or "*" in res or "/" in res:
for i,j in enumerate(res):
if j in "+-*/":
index=i
break
first=res[index-2]
second=res[index-1]
solution=eval(first+res[index]+second)
res=res[:(index-2)]+[str(solution)]+res[index+1:]
return float(res[0])
| 30.388889
| 67
| 0.429616
|
da7b89012f14c71b389a8a9f21dc9cd04d1c2e25
| 18,946
|
py
|
Python
|
installCLI.py
|
oksanagit/installSynApps
|
fe1c774661901d28c05f4b5791b9821c06f666b7
|
[
"BSD-3-Clause"
] | null | null | null |
installCLI.py
|
oksanagit/installSynApps
|
fe1c774661901d28c05f4b5791b9821c06f666b7
|
[
"BSD-3-Clause"
] | null | null | null |
installCLI.py
|
oksanagit/installSynApps
|
fe1c774661901d28c05f4b5791b9821c06f666b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Python script for running the installSynApps module through the CLI
usage: installCLI.py [-h] [-y] [-d] [-c CUSTOMCONFIGURE] [-t THREADS] [-s]
[-i INSTALLPATH] [-n] [-p] [-v]
installSynApps for CLI EPICS and synApps auto-compilation
optional arguments:
-h, --help show this help message and exit
-y, --forceyes Add this flag to automatically go through all of the
installation steps without prompts
-d, --dependency Add this flag to install dependencies via a dependency
script.
-c CUSTOMCONFIGURE, --customconfigure CUSTOMCONFIGURE
Use an external configuration directory. Note that it
must have the same structure as the default one
-t THREADS, --threads THREADS
Define a limit on the number of threads that make is
allowed to use
-s, --singlethread Flag that forces make to run on only one thread. Use
this for low power devices.
-i INSTALLPATH, --installpath INSTALLPATH
Define an override install location to use instead of
the one read from INSTALL_CONFIG
-n, --newconfig Add this flag to use installCLI to create a new
install configuration.
-p, --printcommands Add this flag for installCLI to print commands run
during the build process.
-v, --updateversions Add this flag to update module versions based on
github tags. Must be used with -c flag.
"""
# Support python modules
import os
import subprocess
import argparse
import getpass
import sys
from sys import platform
# InstallSynAppsModules
import installSynApps
import installSynApps.DataModel as DataModel
import installSynApps.Driver as Driver
import installSynApps.IO as IO
# pygithub for github autosync tags integration.
WITH_PYGITHUB=True
try:
from github import Github
except ImportError:
WITH_PYGITHUB=False
# -------------- Some helper functions ------------------
def print_welcome_message():
# Welcome message
print(installSynApps.get_welcome_text())
print("Welcome to the installSynApps module.")
print("It is designed to automate the build process for EPICS and areaDetector.")
print("The scripts included will automatically edit all configuration files")
print("required, and then build with make.")
print()
# Make sure we close logger before exiting
def clean_exit():
IO.logger.close_logger()
exit()
def create_new_install_config():
print_welcome_message()
print("You have selected to create a new install configuration.\n")
install_type = input("Would you like a coprehensive config, an areaDetector config, or a motor config? (AD/Motor/All) > ")
if install_type.lower() == 'ad':
install_template = 'NEW_CONFIG_AD'
print('AreaDetector config selected.\n')
elif install_type.lower() == 'motor':
install_template = 'NEW_CONFIG_MOTOR'
print('Motor config selected.\n')
else:
install_template = 'NEW_CONFIG_ALL'
print('Coprehensive config selected.\n')
write_loc = input('Where would you like the install configuration to be written? > ')
write_loc = os.path.abspath(write_loc)
print('Target output location set to {}'.format(write_loc))
install_loc = input('What should be the target install location for the config? > ')
print('Attempting to load default config with install location {}...'.format(install_loc))
parser = IO.config_parser.ConfigParser('resources')
install_config, message = parser.parse_install_config(config_filename=install_template, force_location=install_loc, allow_illegal=True)
if install_config is None:
print('Parse Error - {}'.format(message))
elif message is not None:
print('Warning - {}'.format(message))
else:
print('Done.')
print('Writing...')
writer = IO.config_writer.ConfigWriter(install_config)
ret, message = writer.write_install_config(filepath=write_loc)
if not ret:
print('Write Error - {}'.format(message))
else:
print()
print('Wrote new install configuration to {}.'.format(write_loc))
print('Please edit INSTALL_CONFIG file to specify build specifications.')
print('Then run ./installCLI.py -c {} to run the install configuration.'.format(write_loc))
def parse_user_input():
path_to_configure = "configure"
parser = argparse.ArgumentParser(description="installSynApps for CLI EPICS and synApps auto-compilation")
config_group = parser.add_argument_group('configuration options')
build_group = parser.add_argument_group('build options')
debug_group = parser.add_argument_group('logging options')
config_group.add_argument('-i', '--installpath', help='Define an override install location to use instead of the one read from INSTALL_CONFIG.')
config_group.add_argument('-c', '--customconfigure', help='Use an external configuration directory. Note that it must have the same structure as the default one.')
config_group.add_argument('-n', '--newconfig', action='store_true', help='Add this flag to use installCLI to create a new install configuration.')
config_group.add_argument('-v', '--updateversions', action='store_true', help='Add this flag to update module versions based on github tags. Must be used with -c flag.')
build_group.add_argument('-y', '--forceyes', action='store_true', help='Add this flag to automatically go through all of the installation steps without prompts.')
build_group.add_argument('-d', '--dependency', action='store_true', help='Add this flag to install dependencies via a dependency script.')
build_group.add_argument('-f', '--flatbinaries', action='store_true', help='Add this flag if you wish for output binary bundles to have a flat format.')
build_group.add_argument('-t', '--threads', help='Define a limit on the number of threads that make is allowed to use.', type=int)
debug_group.add_argument('-l', '--savelog', action='store_true', help='Add this flag to save the build log to a file in the logs/ directory.')
debug_group.add_argument('-m', '--debugmessages', action='store_true', help='Add this flag to enable printing verbose debug messages.')
debug_group.add_argument('-p', '--printcommands', action='store_true', help='Add this flag to print bash/batch commands run by installSynApps.')
arguments = vars(parser.parse_args())
print_welcome_message()
# Two cases where build will not happen, creating new config, and updating versions.
if arguments['newconfig']:
create_new_install_config()
clean_exit()
if arguments['customconfigure'] is not None:
path_to_configure = arguments['customconfigure']
if arguments['updateversions']:
print('Updating module versions for configuration {}'.format(path_to_configure))
if not os.path.exists(os.path.join(path_to_configure, 'INSTALL_CONFIG')):
print("**INSTALL_CONFIG file not found in specified directory!**\nAborting...")
clean_exit()
if not WITH_PYGITHUB:
print("**PyGithub module required for version updates.**")
print("**Install with pip install pygithub**")
print("Exiting...")
clean_exit()
parser = IO.config_parser.ConfigParser(path_to_configure)
install_config, message = parser.parse_install_config(allow_illegal=True)
print('Please enter your github credentials.')
user = input('Username: ')
passwd = getpass.getpass()
sync_tags(user, passwd, install_config, path_to_configure)
print('Done.')
clean_exit()
elif arguments['updateversions']:
print('ERROR - Update versions flag selected but no configure directory given.')
print('Rerun with the -c flag')
print('Aborting...')
clean_exit()
return path_to_configure, arguments['installpath'], arguments
# ----------------- Run the script ------------------------
path_to_configure, force_install_path, args = parse_user_input()
path_to_configure = os.path.abspath(path_to_configure)
yes = args['forceyes']
dep = args['dependency']
single_thread = False
save_log = args['savelog']
show_debug = args['debugmessages']
if args['printcommands']:
IO.logger.toggle_command_printing()
# For a CLI client, we simply sys.stdout.write for logging.
IO.logger.assign_write_function(sys.stdout.write)
if save_log:
IO.logger.initialize_logger()
threads = args['threads']
if threads is None:
threads = 0
elif threads == 1:
single_thread = True
print('Reading install configuration directory located at: {}...'.format(path_to_configure))
print()
# Parse base config file, make sure that it is valid - ask for user input until it is valid
parser = IO.config_parser.ConfigParser(path_to_configure)
install_config, message = parser.parse_install_config(allow_illegal=True, force_location=force_install_path)
if install_config is None:
print('Error parsing Install Config... {}'.format(message))
exit()
elif message is not None:
loc_ok = False
else:
if not yes and force_install_path is None:
new_loc = input('Install location {} OK. Do you wish to continue with this location? (y/n) > '.format(install_config.install_location))
if new_loc == 'n':
loc = input('Please enter a new install_location > ')
install_config.install_location = loc.strip()
for module in install_config.get_module_list():
module.abs_path = install_config.convert_path_abs(module.rel_path)
if module.name == 'EPICS_BASE':
install_config.base_path = module.abs_path
elif module.name == 'SUPPORT':
install_config.support_path = module.abs_path
elif module.name == 'AREA_DETECTOR':
install_config.ad_path = module.abs_path
elif module.name == 'MOTOR':
install_config.motor_path = module.abs_path
loc_ok = False
else:
loc_ok = True
else:
loc_ok = True
# Loop until a valid location is selected
if not loc_ok:
while install_config.is_install_valid() != 1:
print('**ERROR - Given install location - {} - is not valid**'.format(install_config.install_location))
if install_config.is_install_valid() == 0:
print('**Path does not exist**')
elif install_config.is_install_valid() == -1:
print('**Permission Error**')
new_path = input('Please enter a new install location > ')
install_config.install_location = new_path.strip()
for module in install_config.get_module_list():
module.abs_path = install_config.convert_path_abs(module.rel_path)
if module.name == 'EPICS_BASE':
install_config.base_path = module.abs_path
elif module.name == 'SUPPORT':
install_config.support_path = module.abs_path
elif module.name == 'AREA_DETECTOR':
install_config.ad_path = module.abs_path
# Driver Objects for running through build process
cloner = Driver.clone_driver.CloneDriver(install_config)
updater = Driver.update_config_driver.UpdateConfigDriver(path_to_configure, install_config)
builder = Driver.build_driver.BuildDriver(install_config, threads, one_thread=single_thread)
packager = Driver.packager_driver.Packager(install_config)
if not packager.found_distro and platform != 'win32':
print("WARNING - couldn't import distro pip package. This package is used for better identifying your linux distribution.")
print("Note that the output tarball will use the generic 'linux-x86_64' name if packaging on linux.")
if not yes:
custom_output = input('Would you like to manually input a name to replace the generic one? (y/n) > ')
if custom_output == 'y':
custom_os = input('Please enter a suitable output package name: > ')
packager.OS = custom_os
autogenerator = IO.script_generator.ScriptGenerator(install_config)
# Check to make sure that all dependencies are found
status, message = builder.check_dependencies_in_path()
if not status:
print("** ERROR - could not find {} in environment path - is a dependancy. **".format(message))
print("Please install git, make, wget, and tar, and ensure that they are in the system path.")
print("Critical dependancy error, abort.")
clean_exit()
# Ask useer to proceed
print("Ready to start build process with location: {}...".format(install_config.install_location))
if not yes:
response = input("Proceed? (y/n) > ")
else:
response = "y"
if response == "n":
print("Skipping clone + build...")
else:
print()
if not yes:
clone = input("Would you like to clone EPICS and synApps modules? (y/n) > ")
else:
clone = "y"
# Run the clone process
if clone == "y":
print("Cloning EPICS and synApps into {}...".format(install_config.install_location))
print("----------------------------------------------")
unsuccessful = cloner.clone_and_checkout()
if len(unsuccessful) > 0:
for module in unsuccessful:
print("Module {} was either unsuccessfully cloned or checked out.".format(module.name))
if module.name in builder.critical_modules:
print("Critical clone error... abort.")
clean_exit()
print("Check INSTALL_CONFIG file to make sure repositories and versions are valid")
print("----------------------------------------------")
if not yes:
# Update configuration files
update = input("Do you need installSynApps to update configuration files? (y/n) > ")
else:
update = "y"
if update == "y":
print("Updating all RELEASE and configuration files...")
updater.run_update_config()
dep_errors = updater.perform_dependency_valid_check()
for dep_error in dep_errors:
print(dep_error)
# Here the update driver will reorder build to make sure all modules are being built after their dependencies.
print('Reordering module build order to account for intra-module dependencies...')
updater.perform_fix_out_of_order_dependencies()
print("----------------------------------------------")
print("Ready to build EPICS base, support and areaDetector...")
if not dep and not yes:
d = input("Do you need installSynApps to now install dependency packages on this machine? (y/n) > ")
elif dep:
d = "y"
elif yes:
d = 'n'
if d == "y":
print('Acquiring dependencies through dependency script...')
if platform == 'win32':
dep_script_path = os.path.join(path_to_configure, "dependencyInstall.bat")
else:
dep_script_path = os.path.join(path_to_configure, "dependencyInstall.sh")
if not os.path.exists(dep_script_path):
print('Could not find script at {}, skipping...'.format(dep_script_path))
else:
builder.acquire_dependecies(dep_script_path)
if not yes:
# Inform user of number of CPU cores to use and prompt to build
if builder.one_thread:
num_cores = 'one CPU core'
elif builder.threads == 0:
num_cores = 'as many CPU cores as possible'
else:
num_cores = '{} CPU cores'.format(builder.threads)
print("----------------------------------------------")
print('Builder is configured to use {} during compilation...'.format(num_cores))
build = input("Ready to build selected modules... Continue (y/n) > ")
else:
build = "y"
if build == "y":
print("Starting build...")
# Build all
ret, failed_list = builder.build_all()
if ret != 0:
for failed in failed_list:
print('Module {} failed to build, will not package'.format(failed))
if failed in builder.critical_modules:
print("**ERROR - Build failed - {}**".format(message))
print("**Check the INSTALL_CONFIG file to make sure settings and paths are valid**")
print('**Critical build error - abort...**')
clean_exit()
else:
install_config.get_module_by_name(failed).package = "NO"
print("----------------------------------------------")
print("Autogenerating scripts and README file...")
autogenerator.autogenerate_all()
print("Done.")
if ret == 0:
print("Auto-Build of EPICS, synApps, and areaDetector completed successfully.")
else:
print("Auto-Build of EPICS, synApps, and areaDetector completed with some non-critical errors.")
else:
print("Build aborted... Exiting.")
clean_exit()
print()
if not yes:
create_tarball = input('Would you like to create a tarball binary bundle now? (y/n) > ')
else:
create_tarball = 'y'
if create_tarball == 'y':
output_filename = packager.create_bundle_name()
ret = packager.create_package(output_filename, flat_format=args['flatbinaries'])
if ret != 0:
print('ERROR - Failed to create binary bundle. Check install location to make sure it is valid')
clean_exit()
else:
print('Bundle generated at: {}'.format(output_filename))
print()
if not yes:
create_add_on_tarball = input('Would you like to create an add-on tarball to add a module to an existing bundle? (y/n) > ')
else:
create_add_on_tarball = 'n'
if create_add_on_tarball == 'y':
module_name = input('Please enter name of the module you want packaged (All capitals - Ex. ADPROSILICA) > ')
output_filename = packager.create_bundle_name(module_name=module_name)
if output_filename is None:
print('ERROR - No module named {} could be found in current configuration, abort.'.format(module_name))
clean_exit()
ret = packager.create_add_on_package(output_filename, module_name)
print()
if not yes:
create_opi_dir = input('Whould you like to create opi_dir now? (y/n) > ')
else:
create_opi_dir = 'y'
if create_opi_dir == 'y':
ret = packager.create_opi_folder(install_config.install_location)
if ret != 0:
print('ERROR - Failed to create opi bundle.')
clean_exit()
else:
print('OPI screen tarball generated.')
print('Done.')
clean_exit()
| 42.961451
| 175
| 0.650744
|
1bc8887e723c6fac70e3bee53d64daacc71f8fa1
| 160,003
|
py
|
Python
|
peeringdb_server/models.py
|
jlamanna/peeringdb
|
1eda45ffeb6dbaf5d37545305b57beb16c02653d
|
[
"BSD-2-Clause"
] | 224
|
2016-10-13T10:32:33.000Z
|
2022-03-23T13:08:48.000Z
|
peeringdb_server/models.py
|
jlamanna/peeringdb
|
1eda45ffeb6dbaf5d37545305b57beb16c02653d
|
[
"BSD-2-Clause"
] | 1,063
|
2016-06-07T02:57:11.000Z
|
2022-03-31T00:08:07.000Z
|
peeringdb_server/models.py
|
jlamanna/peeringdb
|
1eda45ffeb6dbaf5d37545305b57beb16c02653d
|
[
"BSD-2-Clause"
] | 92
|
2016-10-22T14:59:40.000Z
|
2022-03-26T11:30:12.000Z
|
"""
Django model definitions (database schema).
## django-peeringdb
peeringdb_server uses the abstract models from django-peeringdb.
Often, it makes the most sense for a field to be added to the abstraction
in django-peeringdb, so it can be available for people using local snapshots of the databases.
Generally speaking, if the field is to be added to the REST API output,
it should be added through django-peeringdb.
Fields to facilitate internal operations of peeringdb on the other hand, DO NOT need to be added to django-peeringdb.
## migrations
For concrete models, django-peeringdb and peeringdb_server maintain separate model migrations.
When adding new fields to django-peeringdb make sure migration files for the schema changes exist in both places.
Please open a merge request in peeringdb/django-peeringdb for the field addition as well.
"""
import datetime
import ipaddress
import json
import re
import uuid
from itertools import chain
import django.urls
import django_peeringdb.models as pdb_models
import reversion
from allauth.account.models import EmailAddress, EmailConfirmation
from allauth.socialaccount.models import SocialAccount
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser,
Group,
PermissionsMixin,
UserManager,
)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.mail.message import EmailMultiAlternatives
from django.db import models, transaction
from django.template import loader
from django.utils import timezone
from django.utils.functional import Promise
from django.utils.http import urlquote
from django.utils.translation import override
from django.utils.translation import ugettext_lazy as _
from django_grainy.decorators import grainy_model
from django_grainy.models import Permission, PermissionManager
from django_grainy.util import check_permissions
from django_handleref.models import CreatedDateTimeField, UpdatedDateTimeField
from django_inet.models import ASNField
from passlib.hash import sha256_crypt
from rest_framework_api_key.models import AbstractAPIKey
import peeringdb_server.geo as geo
from peeringdb_server.inet import RdapLookup, RdapNotFoundError
from peeringdb_server.request import bypass_validation
from peeringdb_server.validators import (
validate_address_space,
validate_info_prefixes4,
validate_info_prefixes6,
validate_irr_as_set,
validate_phonenumber,
validate_poc_visible,
validate_prefix_overlap,
)
SPONSORSHIP_LEVELS = (
(1, _("Silver")),
(2, _("Gold")),
(3, _("Platinum")),
(4, _("Diamond")),
)
SPONSORSHIP_CSS = (
(1, "silver"),
(2, "gold"),
(3, "platinum"),
(4, "diamond"),
)
PARTNERSHIP_LEVELS = ((1, _("Data Validation")), (2, _("RIR")))
COMMANDLINE_TOOLS = (
("pdb_renumber_lans", _("Renumber IP Space")),
("pdb_fac_merge", _("Merge Facilities")),
("pdb_fac_merge_undo", _("Merge Facilities: UNDO")),
("pdb_undelete", _("Restore Object(s)")),
)
if settings.TUTORIAL_MODE:
COMMANDLINE_TOOLS += (("pdb_wipe", _("Reset Environment")),)
COMMANDLINE_TOOLS += (("pdb_ixf_ixp_member_import", _("IX-F Import")),)
def debug_mail(*args):
for arg in list(args):
print(arg)
print("-----------------------------------")
def make_relation_filter(field, filt, value, prefix=None):
if prefix:
field = re.sub("^%s__" % prefix, "", field)
field = re.sub("^%s_" % prefix, "", field)
if field == prefix:
field = "id"
if filt:
filt = {f"{field}__{filt}": value}
else:
filt = {field: value}
filt.update(status="ok")
return filt
def validate_PUT_ownership(permission_holder, instance, data, fields):
"""
Helper function that checks if a user or API key has write perms to
the instance provided as well as write perms to any
child instances specified by fields as they exist on
the model and in data.
Example:
validate_PUT_ownership(
request.user,
network_contact,
{
"network": 123,
...
},
["network"]
)
will check that the user has write perms to
1. <NetworkContact> network_contact
2. <Network> network_contact.network
3. <Network> network(id=123)
if any fail the permission check False is returned.
"""
if not check_permissions(permission_holder, instance, "u"):
return False
for fld in fields:
if fld == "net":
field_name = "network"
elif fld == "fac":
field_name = "facility"
else:
field_name = fld
a = getattr(instance, field_name)
try:
s_id = int(data.get(fld, data.get("%s_id" % fld)))
except ValueError:
continue
if a.id != s_id:
try:
other = a.__class__.objects.get(id=s_id)
if not check_permissions(permission_holder, other, "u"):
return False
except ValueError: # if id is not intable
return False
return True
def is_suggested(entity):
"""
Check if the network, facility or exchange is a suggested
entity (is it a memeber of the organization designated to
hold suggested entities).
"""
# if no org is specified, entity suggestion is turned
# off
if not getattr(settings, "SUGGEST_ENTITY_ORG", 0):
return False
org_id = getattr(entity, "org_id", 0)
return org_id == settings.SUGGEST_ENTITY_ORG
class UTC(datetime.tzinfo):
"""
UTC+0 tz for tz aware datetime fields.
"""
def utcoffset(self, d):
return datetime.timedelta(seconds=0)
class URLField(pdb_models.URLField):
"""
Local defaults for URLField.
"""
class ValidationErrorEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ValidationError):
if hasattr(obj, "error_dict"):
return obj.error_dict
return obj.message
elif isinstance(obj, Promise):
return f"{obj}"
return super().default(obj)
class ProtectedAction(ValueError):
def __init__(self, obj):
super().__init__(obj.not_deletable_reason)
self.protected_object = obj
class ProtectedMixin:
"""
Mixin that implements checks for changing
/ deleting a model instance that will block
such actions under certain circumstances.
"""
@property
def deletable(self):
"""
Should return whether the object is currently
in a state where it can safely be soft-deleted.
If not deletable, should specify reason in
`_not_deletable_reason` property.
If deletable, should set `_not_deletable_reason`
property to None.
"""
return True
@property
def not_deletable_reason(self):
return getattr(self, "_not_deletable_reason", None)
def delete(self, hard=False, force=False):
if self.status in ["ok", "pending"]:
if not self.deletable and not force:
raise ProtectedAction(self)
self.delete_cleanup()
return super().delete(hard=hard)
def delete_cleanup(self):
"""
Runs cleanup before delete.
Override this in the class that uses this mixin (if needed).
"""
return
def save_without_timestamp(self):
self._meta.get_field("updated").auto_now = False
try:
self.save()
finally:
self._meta.get_field("updated").auto_now = True
class GeocodeBaseMixin(models.Model):
"""
Mixin to use for geocode enabled entities.
Allows an entity to be geocoded with the pdb_geo_sync command.
"""
geocode_status = models.BooleanField(
default=False,
help_text=_(
"Has this object's address been normalized with a call to the Google Maps API"
),
)
geocode_date = models.DateTimeField(
blank=True, null=True, help_text=_("Last time of attempted geocode")
)
class Meta:
abstract = True
@property
def geocode_coordinates(self):
"""
Return a tuple holding the latitude and longitude.
"""
if self.latitude is not None and self.longitude is not None:
return (self.latitude, self.longitude)
return None
@property
def geocode_address(self):
"""
Returns an address string suitable for geo API query.
"""
# pylint: disable=missing-format-attribute
return "{e.address1} {e.address2}, {e.city}, {e.state} {e.zipcode}".format(
e=self
)
def process_geo_location(self, geocode=True, save=True):
"""
Sets longitude and latitude.
Will return a dict containing normalized address
data.
"""
melissa = geo.Melissa(settings.MELISSA_KEY, timeout=5)
gmaps = geo.GoogleMaps(settings.GOOGLE_GEOLOC_API_KEY, timeout=5)
# geocode using google
use_melissa_coords = False
try:
if geocode:
gmaps.geocode(self)
except geo.Timeout:
raise ValidationError(_("Geo coding timed out"))
except geo.RequestError as exc:
raise ValidationError(_("Geo coding failed: {}").format(exc))
except geo.NotFound:
use_melissa_coords = True
# address normalization using melissa
#
# note: `sanitized` will be an empty dict if melissa
# could not normalize a valid address
try:
sanitized = melissa.sanitize_address_model(self)
except geo.Timeout:
raise ValidationError(_("Geo location lookup timed out"))
except geo.RequestError as exc:
raise ValidationError(_("Geo location lookup failed: {}").format(exc))
# update latitude and longitude
if use_melissa_coords and sanitized:
self.latitude = sanitized["latitude"]
self.longitude = sanitized["longitude"]
if geocode and (not use_melissa_coords or sanitized):
self.geocode_status = True
self.geocode_date = datetime.datetime.now(datetime.timezone.utc)
if sanitized:
sanitized["geocode_status"] = True
sanitized["geocode_date"] = self.geocode_date
if save:
self.save()
return sanitized
class GeoCoordinateCache(models.Model):
"""
Stores geocoordinates for address lookups.
"""
country = pdb_models.CountryField()
city = models.CharField(max_length=255, null=True, blank=True)
address1 = models.CharField(max_length=255, null=True, blank=True)
state = models.CharField(max_length=255, null=True, blank=True)
zipcode = models.CharField(max_length=255, null=True, blank=True)
latitude = models.DecimalField(
_("Latitude"), max_digits=9, decimal_places=6, null=True, blank=True
)
longitude = models.DecimalField(
_("Longitude"), max_digits=9, decimal_places=6, null=True, blank=True
)
fetched = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "peeringdb_geocoord_cache"
verbose_name = _("Geocoordinate Cache")
verbose_name_plural = _("Geocoordinate Cache Entries")
@classmethod
def request_coordinates(cls, **kwargs):
address_fields = [
"address1",
"zipcode",
"state",
"city",
"country",
]
# we only request geo-coordinates if country and
# city/state are specified
if not kwargs.get("country"):
return None
if not kwargs.get("city") and not kwargs.get("state"):
return None
# address string passed to google for lookup
address = []
# filters passed to GeoCoordinateCache for cache retrieval
filters = {}
# attributes passed to GeoCoordinateCache for cache creation
params = {}
# prepare geo-coordinate filters, params and lookup
for field in address_fields:
value = kwargs.get(field, None)
if value and isinstance(value, list):
value = value[0]
if field != "country" and value:
address.append(f"{value}")
else:
country = value
params[field] = value
if value:
filters[field] = value
else:
filters[f"{field}__isnull"] = True
# attempt to retrieve a valid cache
cache = cls.objects.filter(**filters).order_by("-fetched").first()
if cache:
tdiff = timezone.now() - cache.fetched
# check if cache is past expiry date, and expire it if so
if tdiff.total_seconds() > settings.GEOCOORD_CACHE_EXPIRY:
cache.delete()
cache = None
if not cache:
# valid geo-coord cache does not exist, request coordinates
# from google and create a cache entry
address = " ".join(address)
google = geo.GoogleMaps(settings.GOOGLE_GEOLOC_API_KEY)
try:
if params.get("address1"):
typ = "premise"
elif params.get("zipcode"):
typ = "postal"
elif params.get("city"):
typ = "city"
elif params.get("state"):
typ = "state"
else:
typ = "country"
coords = google.geocode_address(address, country, typ=typ)
cache = cls.objects.create(
latitude=coords["lat"], longitude=coords["lng"], **params
)
except geo.NotFound:
# google could not find address
# we still create a cache entry with null coordinates.
cls.objects.create(**params)
raise
return {"longitude": cache.longitude, "latitude": cache.latitude}
class UserOrgAffiliationRequest(models.Model):
"""
Whenever a user requests to be affiliated to an Organization
through an ASN the request is stored in this object.
When an ASN is entered that is not yet in the database it will
notify PDB staff.
When an ASN is entered that is already in the database the organization
adminstration is notified and they can then approve or deny
the affiliation request themselves.
Please look at signals.py for the logic of notification as
well as deriving the organization from the ASN during creation.
"""
org = models.ForeignKey(
"peeringdb_server.Organization",
on_delete=models.CASCADE,
null=True,
blank=True,
help_text=_(
"This organization in our database that was derived from the provided ASN or organization name. If this is empty it means no matching organization was found."
),
related_name="affiliation_requests",
)
org_name = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=_("The organization name entered by the user"),
)
asn = ASNField(help_text=_("The ASN entered by the user"), null=True, blank=True)
user = models.ForeignKey(
"peeringdb_server.User",
on_delete=models.CASCADE,
help_text=_("The user that made the request"),
related_name="affiliation_requests",
)
created = CreatedDateTimeField()
status = models.CharField(
max_length=254,
choices=[
("pending", _("Pending")),
("approved", _("Approved")),
("denied", _("Denied")),
("canceled", _("Canceled")),
],
help_text=_("Status of this request"),
)
class Meta:
db_table = "peeringdb_user_org_affil_request"
verbose_name = _("User to Organization Affiliation Request")
verbose_name_plural = _("User to Organization Affiliation Requests")
@property
def name(self):
"""
If org is set, returns the org's name otherwise returns the
value specified in self.org_name
"""
if self.org_id:
return self.org.name
elif self.org_name:
return self.org_name
return self.asn
def approve(self):
"""
Approve request and add user to org's usergroup.
"""
if self.org_id:
if self.user.is_org_admin(self.org) or self.user.is_org_member(self.org):
self.delete()
return
if (
self.org.admin_usergroup.user_set.count() > 0
or self.org.usergroup.user_set.count() > 0
):
# if there are other users in this org, add user as normal
# member
self.org.usergroup.user_set.add(self.user)
else:
# if there are no other users in this org, add user as admin
# member
self.org.admin_usergroup.user_set.add(self.user)
# we set user to verified
if not self.user.is_verified_user:
self.user.set_verified()
# since it was approved, we don't need to keep the
# request item around
self.status = "approved"
self.delete()
def deny(self):
"""
Deny request, marks request as denied and keeps
it around until requesting user deletes it.
"""
if self.user and self.org:
if self.user.is_org_admin(self.org) or self.user.is_org_member(self.org):
self.delete()
return
self.status = "denied"
self.save()
def cancel(self):
"""
Deny request, marks request as canceled and keeps
it around until requesting user deletes it.
"""
self.status = "canceled"
self.save()
def notify_ownership_approved(self):
"""
Sends a notification email to the requesting user.
"""
if not self.org:
return
# FIXME: why not have the `override` call in email_user in the first place?
with override(self.user.locale):
self.user.email_user(
_('Your affiliation to Organization "{}" has been approved').format(
self.org.name
),
loader.get_template(
"email/notify-user-uoar-ownership-approved.txt"
).render(
{
"uoar": self,
"org_url": f"{settings.BASE_URL}/org/{self.org.id}",
"support_email": settings.DEFAULT_FROM_EMAIL,
}
),
)
class VerificationQueueItem(models.Model):
"""
Keeps track of new items created that need to be reviewed and approved
by administrators.
Queue items are added through the create signals tied to the various
objects (peeringdb_server/signals.py).
"""
# reference to the item that requires review stored in generic fk
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
item = GenericForeignKey("content_type", "object_id")
user = models.ForeignKey(
"peeringdb_server.User",
on_delete=models.CASCADE,
related_name="vqitems",
null=True,
blank=True,
help_text=_("The item that this queue is attached to was created by this user"),
)
org_key = models.ForeignKey(
"peeringdb_server.OrganizationAPIKey",
on_delete=models.CASCADE,
related_name="vqitems",
null=True,
blank=True,
help_text=_(
"The item that this queue is attached to was created by this organization api key"
),
)
created = CreatedDateTimeField()
notified = models.BooleanField(default=False)
class Meta:
db_table = "peeringdb_verification_queue"
unique_together = (("content_type", "object_id"),)
@classmethod
def get_for_entity(cls, entity):
"""
Returns verification queue item for the provided
entity if it exists or raises a DoesNotExist
exception.
"""
return cls.objects.get(
content_type=ContentType.objects.get_for_model(type(entity)),
object_id=entity.id,
)
@property
def item_admin_url(self):
"""
Return admin url for the object in the verification queue.
"""
return django.urls.reverse(
"admin:%s_%s_change"
% (self.content_type.app_label, self.content_type.model),
args=(self.object_id,),
)
@property
def approve_admin_url(self):
"""
Return admin url for approval of the verification queue item.
"""
return django.urls.reverse(
f"admin:{self._meta.app_label}_{self._meta.model_name}_actions",
args=(self.id, "vq_approve"),
)
@property
def deny_admin_url(self):
"""
Return admin url for denial of the verification queue item.
"""
return django.urls.reverse(
f"admin:{self._meta.app_label}_{self._meta.model_name}_actions",
args=(self.id, "vq_deny"),
)
@reversion.create_revision()
@transaction.atomic()
def approve(self):
"""
Approve the verification queue item.
"""
if hasattr(self.item, "status"):
self.item.status = "ok"
if hasattr(self.item, "vq_approve"):
self.item.vq_approve()
self.item.save()
def deny(self):
"""
Deny the verification queue item.
"""
if hasattr(self.item, "vq_deny"):
self.item.vq_deny()
else:
if hasattr(self.item, "ref_tag"):
self.item.delete(hard=True)
else:
self.item.delete()
class DeskProTicket(models.Model):
subject = models.CharField(max_length=255)
body = models.TextField()
user = models.ForeignKey(
"peeringdb_server.User", on_delete=models.CASCADE, null=True, blank=True
)
email = models.EmailField(_("email address"), null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
published = models.DateTimeField(null=True, blank=True)
deskpro_ref = models.CharField(
max_length=32,
null=True,
blank=True,
help_text=_("Ticket reference on the DeskPRO side"),
)
deskpro_id = models.IntegerField(
null=True, blank=True, help_text=_("Ticket id on the DeskPRO side")
)
class Meta:
verbose_name = _("DeskPRO Ticket")
verbose_name_plural = _("DeskPRO Tickets")
class DeskProTicketCC(models.Model):
"""
Describes a contact to be cc'd on the deskpro ticket.
"""
ticket = models.ForeignKey(
DeskProTicket,
on_delete=models.CASCADE,
related_name="cc_set",
)
email = models.EmailField()
class Meta:
unique_together = (("ticket", "email"),)
verbose_name = _("DeskPRO Ticket CC Contact")
verbose_name_plural = _("Deskpro Ticket CC Contacts")
@grainy_model(namespace="peeringdb.organization")
@reversion.register
class Organization(ProtectedMixin, pdb_models.OrganizationBase, GeocodeBaseMixin):
"""
Describes a peeringdb organization.
"""
# FIXME: change this to ImageField - keep
# FileField for now as the server doesn't have all the
# dependencies installedd (libjpeg / Pillow)
logo = models.FileField(
upload_to="logos_user_supplied/",
null=True,
blank=True,
help_text=_(
"Allows you to upload and set a logo image file for this organization"
),
)
@staticmethod
def autocomplete_search_fields():
return (
"id__iexact",
"name__icontains",
)
def __unicode__(self):
return self.name
def related_label(self):
"""
Used by grappelli autocomplete for representation.
Since grappelli doesn't easily allow one to filter status
during autocomplete lookup, make sure the objects
are marked accordingly in the result.
"""
if self.status == "deleted":
return f"[DELETED] {self}"
return f"{self}"
@property
def search_result_name(self):
"""
This will be the name displayed for quick search matches
of this entity.
"""
return self.name
@property
def admin_url(self):
"""
Return the admin URL for this organization (in /cp).
"""
return django.urls.reverse(
"admin:peeringdb_server_organization_change", args=(self.id,)
)
@property
def view_url(self):
"""
Return the URL to this organizations web view.
"""
return "{}{}".format(
settings.BASE_URL, django.urls.reverse("org-view", args=(self.id,))
)
@property
def deletable(self):
"""
Returns whether or not the organization is currently
in a state where it can be marked as deleted.
This will be False for organizations of which ANY
of the following is True:
- has a network under it with status=ok
- has a facility under it with status=ok
- has an exchange under it with status=ok
"""
is_empty = (
self.ix_set_active.count() == 0
and self.fac_set_active.count() == 0
and self.net_set_active.count() == 0
)
if not is_empty:
self._not_deletable_reason = _("Organization has active objects under it.")
return False
elif self.sponsorship and self.sponsorship.active:
self._not_deletable_reason = _(
"Organization is currently an active sponsor. "
"Please contact PeeringDB support to help facilitate "
"the removal of this organization."
)
return False
else:
self._not_deletable_reason = None
return True
@property
def owned(self):
"""
Returns whether or not the organization has been claimed
by any users.
"""
return self.admin_usergroup.user_set.count() > 0
@property
def rdap_collect(self):
"""
Fetche rdap results for all networks under this org and returns
them by asn.
"""
r = {}
for net in self.net_set_active:
try:
rdap = RdapLookup().get_asn(net.asn)
if rdap:
r[net.asn] = rdap
except RdapNotFoundError:
pass
return r
@property
def urls(self):
"""
Returns all the websites for the org based on its
website field and the website fields on all the entities it
owns.
"""
rv = []
if self.website:
rv.append(self.website)
for tag in ["ix", "net", "fac"]:
for ent in getattr(self, "%s_set_active" % tag):
if ent.website:
rv.append(ent.website)
return list(set(rv))
@property
def grainy_namespace_manage(self):
"""
Org administrators need CRUD to this namespace in order
to execute administrative actions (user management, user permission
management).
"""
return f"peeringdb.manage_organization.{self.id}"
@property
def pending_affiliations(self):
"""
Returns queryset holding pending affiliations to this
organization.
"""
return self.affiliation_requests.filter(status="pending")
@property
def net_set_active(self):
"""
Returns queryset holding active networks in this organization.
"""
return self.net_set(manager="handleref").filter(status="ok")
@property
def fac_set_active(self):
"""
Returns queryset holding active facilities in this organization.
"""
return self.fac_set(manager="handleref").filter(status="ok")
@property
def ix_set_active(self):
"""
Returns queryset holding active exchanges in this organization.
"""
return self.ix_set(manager="handleref").filter(status="ok")
@property
def group_name(self):
"""
Returns usergroup name for this organization.
"""
return "org.%s" % self.id
@property
def admin_group_name(self):
"""
Returns admin usergroup name for this organization.
"""
return "%s.admin" % self.group_name
@property
def usergroup(self):
"""
Returns the usergroup for this organization.
"""
return Group.objects.get(name=self.group_name)
@property
def admin_usergroup(self):
"""
Returns the admin usergroup for this organization.
"""
return Group.objects.get(name=self.admin_group_name)
@property
def all_users(self):
"""
Returns a set of all users in the org's user and admin groups.
"""
users = {}
for user in self.usergroup.user_set.all():
users[user.id] = user
for user in self.admin_usergroup.user_set.all():
users[user.id] = user
return sorted(list(users.values()), key=lambda x: x.full_name)
@property
def sponsorship(self):
"""
Returns sponsorship object for this organization. If the organization
has no sponsorship ongoing return None.
"""
now = datetime.datetime.now().replace(tzinfo=UTC())
return (
self.sponsorship_set.filter(start_date__lte=now, end_date__gte=now)
.order_by("-start_date")
.first()
)
@classmethod
@reversion.create_revision()
@transaction.atomic()
def create_from_rdap(cls, rdap, asn, org_name=None):
"""
Creates organization from rdap result object.
"""
name = rdap.org_name
if not name:
name = org_name or ("AS%d" % (asn))
if cls.objects.filter(name=name).exists():
return cls.objects.get(name=name), False
else:
org = cls.objects.create(name=name, status="ok")
return org, True
def delete_cleanup(self, hard=False):
for affiliation in self.affiliation_requests.filter(status="pending"):
affiliation.cancel()
def default_time_s():
"""
Returns datetime set to today with a time of 00:00:00.
"""
now = datetime.datetime.now()
return now.replace(hour=0, minute=0, second=0, tzinfo=UTC())
def default_time_e():
"""
Returns datetime set to today with a time of 23:59:59.
"""
now = datetime.datetime.now()
return now.replace(hour=23, minute=59, second=59, tzinfo=UTC())
class OrganizationAPIKey(AbstractAPIKey):
"""
An API Key managed by an organization.
"""
org = models.ForeignKey(
Organization,
on_delete=models.CASCADE,
related_name="api_keys",
)
email = models.EmailField(
_("email address"), max_length=254, null=False, blank=False
)
class Meta(AbstractAPIKey.Meta):
verbose_name = "Organization API key"
verbose_name_plural = "Organization API keys"
db_table = "peeringdb_org_api_key"
class OrganizationAPIPermission(Permission):
"""
Describes permission for a OrganizationAPIKey.
"""
class Meta:
verbose_name = _("Organization API key Permission")
verbose_name_plural = _("Organization API key Permission")
base_manager_name = "objects"
org_api_key = models.ForeignKey(
OrganizationAPIKey, related_name="grainy_permissions", on_delete=models.CASCADE
)
objects = PermissionManager()
class Sponsorship(models.Model):
"""
Allows an organization to be marked for sponsorship
for a designated timespan.
"""
orgs = models.ManyToManyField(
Organization,
through="peeringdb_server.SponsorshipOrganization",
related_name="sponsorship_set",
)
start_date = models.DateTimeField(
_("Sponsorship starts on"), default=default_time_s
)
end_date = models.DateTimeField(_("Sponsorship ends on"), default=default_time_e)
notify_date = models.DateTimeField(
_("Expiration notification sent on"), null=True, blank=True
)
level = models.PositiveIntegerField(choices=SPONSORSHIP_LEVELS, default=1)
class Meta:
db_table = "peeringdb_sponsorship"
verbose_name = _("Sponsorship")
verbose_name_plural = _("Sponsorships")
@classmethod
def active_by_org(cls):
"""
Yields (Organization, Sponsorship) for all currently
active sponsorships.
"""
now = datetime.datetime.now().replace(tzinfo=UTC())
qset = cls.objects.filter(start_date__lte=now, end_date__gte=now)
qset = qset.prefetch_related("sponsorshiporg_set")
for sponsorship in qset:
for org in sponsorship.orgs.all():
yield org, sponsorship
@property
def active(self):
now = datetime.datetime.now().replace(tzinfo=UTC())
return self.start_date <= now and self.end_date >= now
@property
def label(self):
"""
Returns the label for this sponsorship's level.
"""
return dict(SPONSORSHIP_LEVELS).get(self.level)
@property
def css(self):
"""
Returns the css class for this sponsorship's level
"""
return dict(SPONSORSHIP_CSS).get(self.level)
def notify_expiration(self):
"""
Sends an expiration notice to SPONSORSHIPS_EMAIL
Notification is only sent if notify_date < expiration_date
"""
if self.notify_date is not None and self.notify_date >= self.end_date:
return False
msg = loader.get_template(
"email/notify-sponsorship-admin-expiration.txt"
).render({"instance": self})
org_names = ", ".join([org.name for org in self.orgs.all()])
mail = EmailMultiAlternatives(
("{}: {}").format(_("Sponsorship Expired"), org_names),
msg,
settings.DEFAULT_FROM_EMAIL,
[settings.SPONSORSHIPS_EMAIL],
)
mail.attach_alternative(msg.replace("\n", "<br />\n"), "text/html")
mail.send(fail_silently=True)
self.notify_date = datetime.datetime.now(tz=datetime.timezone.utc)
self.save()
return True
class SponsorshipOrganization(models.Model):
"""
Describes an organization->sponsorship relationship.
"""
org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="sponsorshiporg_set"
)
sponsorship = models.ForeignKey(
Sponsorship, on_delete=models.CASCADE, related_name="sponsorshiporg_set"
)
url = models.URLField(
_("URL"),
help_text=_(
"If specified clicking the sponsorship will take the user to this location"
),
blank=True,
null=True,
)
logo = models.FileField(
upload_to="logos/",
null=True,
blank=True,
help_text=_(
"Allows you to upload and set a logo image file for this sponsorship"
),
)
class Partnership(models.Model):
"""
Allows an organization to be marked as a partner.
It will appear on the "partners" page.
"""
org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="partnerships"
)
level = models.PositiveIntegerField(choices=PARTNERSHIP_LEVELS, default=1)
url = models.URLField(
_("URL"),
help_text=_(
"If specified clicking the partnership will take the user to this location"
),
blank=True,
null=True,
)
logo = models.FileField(
upload_to="logos/",
null=True,
blank=True,
help_text=_(
"Allows you to upload and set a logo image file for this partnership"
),
)
class Meta:
db_table = "peeringdb_partnership"
verbose_name = _("Partnership")
verbose_name_plural = _("Partnerships")
@property
def label(self):
return dict(PARTNERSHIP_LEVELS).get(self.level)
class OrganizationMerge(models.Model):
"""
When an organization is merged into another via admin.merge_organizations
it is logged here, allowing the merge to be undone.
"""
from_org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="merged_to"
)
to_org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="merged_from"
)
created = models.DateTimeField(_("Merged on"), auto_now_add=True)
class Meta:
db_table = "peeringdb_organization_merge"
verbose_name = _("Organization Merge")
verbose_name_plural = _("Organization Merges")
def log_entity(self, entity, note=""):
"""
Mark an entity as moved during this particular merge.
Entity can be any handleref instance or a User instance.
"""
return OrganizationMergeEntity.objects.create(
merge=self, entity=entity, note=note
)
def undo(self):
"""
Undo this merge.
"""
# undelete original org
self.from_org.status = "ok"
self.from_org.save()
for row in self.entities.all():
entity = row.entity
tag = getattr(entity, "ref_tag", None)
if tag:
# move handleref entity
entity.org = self.from_org
entity.save()
else:
# move user entity
group = getattr(self.from_org, row.note)
group.user_set.add(entity)
self.to_org.usergroup.user_set.remove(entity)
self.to_org.admin_usergroup.user_set.remove(entity)
self.delete()
class OrganizationMergeEntity(models.Model):
"""
This holds the entities moved during an
organization merge stored in OrganizationMerge.
"""
merge = models.ForeignKey(
OrganizationMerge, on_delete=models.CASCADE, related_name="entities"
)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
entity = GenericForeignKey("content_type", "object_id")
note = models.CharField(max_length=32, blank=True, null=True)
class Meta:
db_table = "peeringdb_organization_merge_entity"
verbose_name = _("Organization Merge: Entity")
verbose_name_plural = _("Organization Merge: Entities")
@grainy_model(namespace="facility", parent="org")
@reversion.register
class Facility(ProtectedMixin, pdb_models.FacilityBase, GeocodeBaseMixin):
"""
Describes a peeringdb facility.
"""
org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="fac_set"
)
website = models.URLField(_("Website"), blank=False)
ix_count = models.PositiveIntegerField(
_("number of exchanges at this facility"),
help_text=_("number of exchanges at this facility"),
null=False,
default=0,
)
net_count = models.PositiveIntegerField(
_("number of networks at this facility"),
help_text=_("number of networks at this facility"),
null=False,
default=0,
)
# FIXME: delete cascade needs to be fixed in django-peeringdb, can remove
# this afterwards
class HandleRef:
tag = "fac"
delete_cascade = ["ixfac_set", "netfac_set"]
class Meta(pdb_models.FacilityBase.Meta):
pass
@staticmethod
def autocomplete_search_fields():
"""
Returns a tuple of field query strings to be used during quick search
query.
"""
return (
"id__iexact",
"name__icontains",
)
@classmethod
def related_to_net(cls, value=None, filt=None, field="network_id", qset=None):
"""
Returns queryset of Facility objects that
are related to the network specified via net_id
Relationship through netfac -> net
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkFacility.handleref.filter(**filt)
return qset.filter(id__in=[i.facility_id for i in q])
@classmethod
def not_related_to_net(cls, value=None, filt=None, field="network_id", qset=None):
"""
Returns queryset of Facility objects that
are related to the network specified via net_id
Relationship through netfac -> net
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkFacility.handleref.filter(**filt)
return qset.exclude(id__in=[i.facility_id for i in q])
@classmethod
def related_to_multiple_networks(
cls, value_list=None, field="network_id", qset=None
):
"""
Returns queryset of Facility objects that
are related to ALL networks specified in the value list
(a list of integer network ids).
Used in Advanced Search (ALL search).
Relationship through netfac -> net
"""
if not len(value_list):
raise ValueError("List must contain at least one network id")
if not qset:
qset = cls.handleref.undeleted()
value = value_list.pop(0)
filt = make_relation_filter(field, None, value)
netfac_qset = NetworkFacility.handleref.filter(**filt)
final_queryset = qset.filter(id__in=[nf.facility_id for nf in netfac_qset])
# Need the intersection of the next networks
for value in value_list:
filt = make_relation_filter(field, None, value)
netfac_qset = NetworkFacility.handleref.filter(**filt)
fac_qset = qset.filter(id__in=[nf.facility_id for nf in netfac_qset])
final_queryset = final_queryset & fac_qset
return final_queryset
@classmethod
def related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Returns queryset of Facility objects that
are related to the ixwork specified via ix_id
Relationship through ixfac -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = InternetExchangeFacility.handleref.filter(**filt)
return qset.filter(id__in=[i.facility_id for i in q])
@classmethod
def not_related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Returns queryset of Facility objects that
are related to the ixwork specified via ix_id
Relationship through ixfac -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = InternetExchangeFacility.handleref.filter(**filt)
return qset.exclude(id__in=[i.facility_id for i in q])
@classmethod
def overlapping_asns(cls, asns, qset=None):
"""
Returns queryset of Facility objects
that have a relationship to all asns specified in `asns`
Relationship through netfac.
Arguments:
- asns <list>: list of asns
Keyword Arguments:
- qset <Facility QuerySet>: if specified use as base query
Returns:
- Facility QuerySet
"""
facilities = {}
shared_facilities = []
count = len(asns)
if count == 1:
raise ValidationError(_("Need to specify at least two asns"))
if count > 25:
raise ValidationError(_("Can only compare a maximum of 25 asns"))
# fist we need to collect all active facilities related to any
# of the specified asns
for asn in asns:
for netfac in NetworkFacility.objects.filter(
network__asn=asn, status="ok"
).select_related("network"):
if netfac.facility_id not in facilities:
facilities[netfac.facility_id] = {}
facilities[netfac.facility_id][asn] = True
# then we check for the facilities that have all of the asns
# peering by comparing the counts of collected asns at each
# facility with the count of asns provided
for fac_id, collected_asns in list(facilities.items()):
if len(list(collected_asns.keys())) == count:
shared_facilities.append(fac_id)
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(id__in=shared_facilities)
@property
def sponsorship(self):
"""
Returns sponsorship oject for this facility (through the owning org).
"""
return self.org.sponsorship
@property
def search_result_name(self):
"""
This will be the name displayed for quick search matches
of this entity.
"""
return self.name
@property
def netfac_set_active(self):
"""
Returns queryset of active NetworkFacility objects connected to this
facility.
"""
return self.netfac_set.filter(status="ok")
@property
def ixfac_set_active(self):
"""
Returns queryset of active InternetExchangeFacility objects connected
to this facility.
"""
return self.ixfac_set.filter(status="ok")
@property
def view_url(self):
"""
Return the URL to this facility's web view.
"""
return "{}{}".format(
settings.BASE_URL, django.urls.reverse("fac-view", args=(self.id,))
)
@property
def deletable(self):
"""
Returns whether or not the facility is currently
in a state where it can be marked as deleted.
This will be False for facilites of which ANY
of the following is True:
- has a network facility under it with status=ok
- has an exchange facility under it with status=ok
"""
if self.ixfac_set_active.exists():
facility_names = ", ".join(
[ixfac.ix.name for ixfac in self.ixfac_set_active.all()[:5]]
)
self._not_deletable_reason = _(
"Facility has active exchange presence(s): {} ..."
).format(facility_names)
return False
elif self.netfac_set_active.exists():
network_names = ", ".join(
[netfac.network.name for netfac in self.netfac_set_active.all()[:5]]
)
self._not_deletable_reason = _(
"Facility has active network presence(s): {} ..."
).format(network_names)
return False
else:
self._not_deletable_reason = None
return True
def validate_phonenumbers(self):
self.tech_phone = validate_phonenumber(self.tech_phone, self.country.code)
self.sales_phone = validate_phonenumber(self.sales_phone, self.country.code)
@grainy_model(namespace="internetexchange", parent="org")
@reversion.register
class InternetExchange(ProtectedMixin, pdb_models.InternetExchangeBase):
"""
Describes a peeringdb exchange.
"""
ixf_import_request = models.DateTimeField(
_("Manual IX-F import request"),
help_text=_("Date of most recent manual import request"),
null=True,
blank=True,
)
ixf_import_request_status = models.CharField(
_("Manual IX-F import status"),
help_text=_("The current status of the manual ix-f import request"),
choices=(
("queued", _("Queued")),
("importing", _("Importing")),
("finished", _("Finished")),
),
max_length=32,
default="queued",
)
ixf_import_request_user = models.ForeignKey(
"peeringdb_server.User",
null=True,
blank=True,
help_text=_("The user that triggered the manual ix-f import request"),
on_delete=models.SET_NULL,
related_name="requested_ixf_imports",
)
org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="ix_set"
)
fac_count = models.PositiveIntegerField(
_("number of facilities at this exchange"),
help_text=_("number of facilities at this exchange"),
null=False,
default=0,
)
net_count = models.PositiveIntegerField(
_("number of networks at this exchange"),
help_text=_("number of networks at this exchange"),
null=False,
default=0,
)
@staticmethod
def autocomplete_search_fields():
"""
Returns a tuple of field query strings to be used during quick search
query.
"""
return (
"id__iexact",
"name__icontains",
)
def __unicode__(self):
return self.name
@classmethod
def related_to_ixlan(cls, value=None, filt=None, field="ixlan_id", qset=None):
"""
Returns queryset of InternetExchange objects that
are related to IXLan specified by ixlan_id
Relationship through ixlan.
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value, prefix="ixlan")
q = IXLan.handleref.filter(**filt)
return qset.filter(id__in=[ix.ix_id for ix in q])
@classmethod
def related_to_ixfac(cls, value=None, filt=None, field="ixfac_id", qset=None):
"""
Returns queryset of InternetExchange objects that
are related to IXfac link specified by ixfac_id
Relationship through ixfac.
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value, prefix="ixfac")
q = InternetExchangeFacility.handleref.filter(**filt)
return qset.filter(id__in=[ix.ix_id for ix in q])
@classmethod
def related_to_fac(cls, filt=None, value=None, field="facility_id", qset=None):
"""
Returns queryset of InternetExchange objects that
are related to the facility specified by fac_id
Relationship through ixfac -> fac
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = InternetExchangeFacility.handleref.filter(**filt)
return qset.filter(id__in=[ix.ix_id for ix in q])
@classmethod
def related_to_net(cls, filt=None, value=None, field="network_id", qset=None):
"""
Returns queryset of InternetExchange objects that
are related to the network specified by network_id
Relationship through netixlan -> ixlan
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkIXLan.handleref.filter(**filt).select_related("ixlan")
return qset.filter(id__in=[nx.ixlan.ix_id for nx in q])
@classmethod
def related_to_multiple_networks(
cls, value_list=None, field="network_id", qset=None
):
"""
Returns queryset of InternetExchange objects that
are related to ALL networks specified in the value list
(a list of integer network ids).
Used in Advanced Search (ALL search).
Relationship through netixlan -> ixlan
"""
if not len(value_list):
raise ValueError("List must contain at least one network id")
if not qset:
qset = cls.handleref.undeleted()
value = value_list.pop(0)
filt = make_relation_filter(field, None, value)
netixlan_qset = NetworkIXLan.handleref.filter(**filt).select_related("ixlan")
final_queryset = qset.filter(id__in=[nx.ixlan.ix_id for nx in netixlan_qset])
# Need the intersection of the next networks
for value in value_list:
filt = make_relation_filter(field, None, value)
netixlan_qset = NetworkIXLan.handleref.filter(**filt).select_related(
"ixlan"
)
ix_qset = qset.filter(id__in=[nx.ixlan.ix_id for nx in netixlan_qset])
final_queryset = final_queryset & ix_qset
return final_queryset
@classmethod
def not_related_to_net(cls, filt=None, value=None, field="network_id", qset=None):
"""
Returns queryset of InternetExchange objects that
are not related to the network specified by network_id
Relationship through netixlan -> ixlan
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkIXLan.handleref.filter(**filt).select_related("ixlan")
return qset.exclude(id__in=[nx.ixlan.ix_id for nx in q])
@classmethod
def related_to_ipblock(cls, ipblock, qset=None):
"""
Returns queryset of InternetExchange objects that
have ixlan prefixes matching the ipblock specified.
Relationship through ixlan -> ixpfx
"""
if not qset:
qset = cls.handleref.undeleted()
q = IXLanPrefix.objects.select_related("ixlan").filter(
prefix__startswith=ipblock
)
return qset.filter(id__in=[pfx.ixlan.ix_id for pfx in q])
@classmethod
def overlapping_asns(cls, asns, qset=None):
"""
Returns queryset of InternetExchange objects
that have a relationship to all asns specified in `asns`
Relationship through ixlan -> netixlan
Arguments:
- asns <list>: list of asns
Keyword Arguments:
- qset <InternetExchange QuerySet>: if specified use as base query
Returns:
- InternetExchange QuerySet
"""
exchanges = {}
shared_exchanges = []
count = len(asns)
if count == 1:
raise ValidationError(_("Need to specify at least two asns"))
if count > 25:
raise ValidationError(_("Can only compare a maximum of 25 asns"))
# fist we need to collect all active exchanges related to any
# of the specified asns
for asn in asns:
for netixlan in NetworkIXLan.objects.filter(
network__asn=asn, status="ok"
).select_related("network", "ixlan"):
if netixlan.ixlan.ix_id not in exchanges:
exchanges[netixlan.ixlan.ix_id] = {}
exchanges[netixlan.ixlan.ix_id][asn] = True
# then we check for the exchanges that have all of the asns
# peering by comparing the counts of collected asns at each
# exchange with the count of asns provided
for ix_id, collected_asns in list(exchanges.items()):
if len(list(collected_asns.keys())) == count:
shared_exchanges.append(ix_id)
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(id__in=shared_exchanges)
@classmethod
def filter_capacity(cls, filt=None, value=None, qset=None):
"""
Returns queryset of InternetExchange objects filtered by capacity
in mbits.
Arguments:
- filt (`str`|`None`): match operation, None meaning exact match
- 'gte': greater than equal
- 'lte': less than equal
- 'gt': greater than
- 'lt': less than
- value(`int`): capacity to filter in mbits
- qset(`InternetExchange`): if specified will filter ontop of
this existing query set
"""
if not qset:
qset = cls.handleref.undeleted()
# prepar field filters
if filt:
filters = {f"capacity__{filt}": value}
else:
filters = {"capacity": value}
# find exchanges that have the matching capacity
# exchange capacity is simply the sum of its port speeds
netixlans = NetworkIXLan.handleref.undeleted()
capacity_set = (
netixlans.values("ixlan_id")
.annotate(capacity=models.Sum("speed"))
.filter(**filters)
)
# collect ids
# since ixlan id == exchange id we can simply use those
qualifying = [c["ixlan_id"] for c in capacity_set]
# finally limit the queryset by the ix (ixlan) ids that matched
# the capacity filter
qset = qset.filter(id__in=qualifying)
return qset
@classmethod
def ixf_import_request_queue(cls, limit=0):
qset = InternetExchange.objects.filter(
ixf_import_request__isnull=False, ixf_import_request_status="queued"
).order_by("ixf_import_request")
if limit:
qset = qset[:limit]
return qset
@property
def ixlan(self):
"""
Returns the ixlan for this exchange.
As per #21, each exchange will get one ixlan with a matching
id, but the schema is to remain unchanged until a major
version bump.
"""
return self.ixlan_set.first()
@property
def networks(self):
"""
Returns all active networks at this exchange.
"""
networks = []
for ixlan in self.ixlan_set_active:
for netixlan in ixlan.netixlan_set_active:
networks.append(netixlan.network_id)
return list(set(networks))
@property
def search_result_name(self):
"""
This will be the name displayed for quick search matches
of this entity.
"""
return self.name
@property
def ixlan_set_active(self):
"""
Returns queryset of active ixlan objects at this exchange.
"""
return self.ixlan_set(manager="handleref").filter(status="ok")
@property
def ixlan_set_active_or_pending(self):
"""
Returns queryset of active or pending ixlan objects at
this exchange.
"""
return self.ixlan_set(manager="handleref").filter(status__in=["ok", "pending"])
@property
def ixfac_set_active(self):
"""
Returns queryset of active ixfac objects at this exchange.
"""
return (
self.ixfac_set(manager="handleref")
.select_related("facility")
.filter(status="ok")
)
@property
def sponsorship(self):
"""
Returns sponsorship object for this exchange (through owning org).
"""
return self.org.sponsorship
@property
def view_url(self):
"""
Return the URL to this facility's web view.
"""
return "{}{}".format(
settings.BASE_URL, django.urls.reverse("ix-view", args=(self.id,))
)
@property
def derived_proto_unicast(self):
"""
Returns a value for "proto_unicast" derived from the exchanges's
ixpfx records.
If the ix has a IPv4 ixpfx, proto_unicast should be True.
"""
return self.ixlan.ixpfx_set_active.filter(protocol="IPv4").exists()
@property
def derived_proto_ipv6(self):
"""
Returns a value for "proto_ipv6" derived from the exchanges's
ixpfx records.
If the ix has a IPv6 ixpfx, proto_ipv6 should be True.
"""
return self.ixlan.ixpfx_set_active.filter(protocol="IPv6").exists()
@property
def derived_network_count(self):
"""
Returns an ad hoc count of networks attached to an Exchange.
Used in the deletable property to ensure an accurate count
even if net_count signals are not being used.
"""
return (
NetworkIXLan.objects.select_related("network")
.filter(ixlan__ix_id=self.id, status="ok")
.aggregate(net_count=models.Count("network_id", distinct=True))["net_count"]
)
@property
def deletable(self):
"""
Returns whether or not the exchange is currently
in a state where it can be marked as deleted.
This will be False for exchanges of which ANY
of the following is True:
- has netixlans connected to it
- ixfac relationship
"""
if self.ixfac_set_active.exists():
facility_names = ", ".join(
[ixfac.facility.name for ixfac in self.ixfac_set_active.all()[:5]]
)
self._not_deletable_reason = _(
"Exchange has active facility connection(s): {} ..."
).format(facility_names)
return False
elif self.derived_network_count > 0:
self._not_deletable_reason = _("Exchange has active peer(s)")
return False
else:
self._not_deletable_reason = None
return True
@property
def ixf_import_request_recent_status(self):
"""
Returns the recent ixf import request status as a tuple
of value, display.
"""
if not self.ixf_import_request:
return "", ""
value = self.ixf_import_request_status
display = self.get_ixf_import_request_status_display
if self.ixf_import_request_status == "queued":
return value, display
now = timezone.now()
delta = (now - self.ixf_import_request).total_seconds()
if delta < 3600:
return value, display
return "", ""
@property
def ixf_import_css(self):
"""
Returns the appropriate bootstrap alert class
depending on recent import request status.
"""
status, _ = self.ixf_import_request_recent_status
if status == "queued":
return "alert alert-warning"
if status == "finished":
return "alert alert-success"
return ""
def vq_approve(self):
"""
Called when internet exchange is approved in verification
queue.
"""
# since we are creating a pending ixland and prefix
# during exchange creation, we need to make sure those
# get approved as well when the exchange gets approved
for ixlan in self.ixlan_set.filter(status="pending"):
ixlan.status = "ok"
ixlan.save()
for ixpfx in ixlan.ixpfx_set.filter(status="pending"):
ixpfx.status = "ok"
ixpfx.save()
def save(self, create_ixlan=True, **kwargs):
"""
When an internet exchange is saved, make sure the ixlan for it
exists.
Keyword Argument(s):
- create_ixlan (`bool`=True): if True and the ix is missing
its ixlan, create it
"""
r = super().save(**kwargs)
if not self.ixlan and create_ixlan:
ixlan = IXLan(ix=self, status=self.status, mtu=0)
# ixlan id will be set to match ix id in ixlan's clean()
# call
ixlan.clean()
ixlan.save()
return r
def validate_phonenumbers(self):
self.tech_phone = validate_phonenumber(self.tech_phone, self.country.code)
self.policy_phone = validate_phonenumber(self.policy_phone, self.country.code)
def clean(self):
self.validate_phonenumbers()
def request_ixf_import(self, user=None):
self.ixf_import_request = timezone.now()
if self.ixf_import_request_status == "importing":
raise ValidationError({"non_field_errors": ["Import is currently ongoing"]})
self.ixf_import_request_status = "queued"
self.ixf_import_request_user = user
self.save_without_timestamp()
@grainy_model(namespace="ixfac", parent="ix")
@reversion.register
class InternetExchangeFacility(pdb_models.InternetExchangeFacilityBase):
"""
Describes facility to exchange relationship.
"""
ix = models.ForeignKey(
InternetExchange, on_delete=models.CASCADE, related_name="ixfac_set"
)
facility = models.ForeignKey(
Facility, on_delete=models.CASCADE, default=0, related_name="ixfac_set"
)
@classmethod
def related_to_name(cls, value=None, filt=None, field="facility__name", qset=None):
"""
Filter queryset of ixfac objects related to facilities with name match
in facility__name according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(**make_relation_filter(field, filt, value))
@classmethod
def related_to_country(
cls, value=None, filt=None, field="facility__country", qset=None
):
"""
Filter queryset of ixfac objects related to country via match
in facility__country according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.filter(status="ok")
return qset.filter(**make_relation_filter(field, filt, value))
@classmethod
def related_to_city(cls, value=None, filt=None, field="facility__city", qset=None):
"""
Filter queryset of ixfac objects related to city via match
in facility__city according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(**make_relation_filter(field, filt, value))
@property
def descriptive_name(self):
"""
Returns a descriptive label of the ixfac for logging purposes.
"""
return f"ixfac{self.id} {self.ix.name} <-> {self.facility.name}"
class Meta:
unique_together = ("ix", "facility")
db_table = "peeringdb_ix_facility"
@grainy_model(namespace="ixlan", namespace_instance="{instance.ix.grainy_namespace}")
@reversion.register
class IXLan(pdb_models.IXLanBase):
"""
Describes a LAN at an exchange.
"""
# as we are preparing to drop IXLans from the schema, as an interim
# step (#21) we are giving each ix one ixlan with matching ids, so we need
# to have an id field that doesnt automatically increment
id = models.IntegerField(primary_key=True)
ix = models.ForeignKey(
InternetExchange, on_delete=models.CASCADE, default=0, related_name="ixlan_set"
)
# IX-F import fields
ixf_ixp_import_enabled = models.BooleanField(default=False)
ixf_ixp_import_error = models.TextField(
_("IX-F error"),
blank=True,
null=True,
help_text=_("Reason IX-F data could not be parsed"),
)
ixf_ixp_import_error_notified = models.DateTimeField(
_("IX-F error notification date"),
help_text=_("Last time we notified the exchange about the IX-F parsing issue"),
null=True,
blank=True,
)
ixf_ixp_import_protocol_conflict = models.IntegerField(
_("IX-F sent IPs for unsupported protocol"),
help_text=_(
"IX has been sending IP addresses for protocol not supported by network"
),
null=True,
blank=True,
default=0,
)
# FIXME: delete cascade needs to be fixed in django-peeringdb, can remove
# this afterwards
class HandleRef:
tag = "ixlan"
delete_cascade = ["ixpfx_set", "netixlan_set"]
class Meta:
db_table = "peeringdb_ixlan"
@classmethod
def api_cache_permissions_applicator(cls, row, ns, permission_holder):
"""
Applies permissions to a row in an api-cache result
set for ixlan.
This will strip `ixf_ixp_member_list_url` fields for
users / api keys that don't have read permissions for them according
to `ixf_ixp_member_list_url_visible`
Argument(s):
- row (dict): ixlan row from api-cache result
- ns (str): ixlan namespace as determined during api-cache
result rendering
- permission_holder (User or API Key)
"""
visible = row.get("ixf_ixp_member_list_url_visible").lower()
if not permission_holder and visible == "public":
return
namespace = f"{ns}.ixf_ixp_member_list_url.{visible}"
if not check_permissions(permission_holder, namespace, "r", explicit=True):
try:
del row["ixf_ixp_member_list_url"]
except KeyError:
pass
@property
def descriptive_name(self):
"""
Returns a descriptive label of the ixlan for logging purposes.
"""
return f"ixlan{self.id} {self.ix.name}"
@property
def ixpfx_set_active(self):
"""
Returns queryset of active prefixes at this ixlan.
"""
return self.ixpfx_set(manager="handleref").filter(status="ok")
@property
def ixpfx_set_active_or_pending(self):
"""
Returns queryset of active or pending prefixes at this ixlan.
"""
return self.ixpfx_set(manager="handleref").filter(status__in=["ok", "pending"])
@property
def netixlan_set_active(self):
"""
Returns queryset of active netixlan objects at this ixlan.
"""
return (
self.netixlan_set(manager="handleref")
.select_related("network")
.filter(status="ok")
)
# q = NetworkIXLan.handleref.filter(ixlan_id=self.id).filter(status="ok")
# return Network.handleref.filter(id__in=[i.network_id for i in
# q]).filter(status="ok")
@staticmethod
def autocomplete_search_fields():
"""
Used by grappelli autocomplete to determine what
fields to search in.
"""
return ("ix__name__icontains",)
def related_label(self):
"""
Used by grappelli autocomplete for representation.
"""
return f"{self.ix.name} IXLan ({self.id})"
def test_ipv4_address(self, ipv4):
"""
Test that the ipv4 a exists in one of the prefixes in this ixlan.
"""
for pfx in self.ixpfx_set_active:
if pfx.test_ip_address(ipv4):
return True
return False
def test_ipv6_address(self, ipv6):
"""
Test that the ipv6 address exists in one of the prefixes in this ixlan.
"""
for pfx in self.ixpfx_set_active:
if pfx.test_ip_address(ipv6):
return True
return False
def clean(self):
# id is set and does not match the parent ix id
if self.id and self.id != self.ix.id:
raise ValidationError({"id": _("IXLan id needs to match parent ix id")})
# id is not set (new ixlan)
if not self.id:
# ixlan for ix already exists
if self.ix.ixlan:
raise ValidationError(_("Ixlan for exchange already exists"))
# enforce correct id moving forward
self.id = self.ix.id
if self.ixf_ixp_member_list_url is None and self.ixf_ixp_import_enabled:
raise ValidationError(
_(
"Cannot enable IX-F import without specifying the IX-F member list url"
)
)
return super().clean()
@reversion.create_revision()
@transaction.atomic()
def add_netixlan(self, netixlan_info, save=True, save_others=True):
"""
This function allows for sane adding of netixlan object under
this ixlan.
It will take into account whether an ipaddress can be claimed from a
soft-deleted netixlan or whether an object already exists
that should be updated instead of creating a new netixlan instance.
Arguments:
- netixlan_info (NetworkIXLan): a netixlan instance describe the netixlan
you want to add to this ixlan. Note that this instance will actually
not be saved. It only serves as an information provider.
Keyword Arguments:
- save (bool): if true commit changes to db
Returns:
- {netixlan, created, changed, log}
"""
log = []
changed = []
created = False
ipv4 = netixlan_info.ipaddr4
ipv6 = netixlan_info.ipaddr6
asn = netixlan_info.asn
ipv4_valid = False
ipv6_valid = False
def result(netixlan=None):
return {
"netixlan": netixlan,
"created": created,
"changed": changed,
"log": log,
}
# check if either of the provided ip addresses are a fit for ANY of
# the prefixes in this ixlan
for pfx in self.ixpfx_set_active:
if pfx.test_ip_address(ipv4):
ipv4_valid = True
if pfx.test_ip_address(ipv6):
ipv6_valid = True
# If neither ipv4 nor ipv6 match any of the prefixes, log the issue
# and bail
if ipv4 and not ipv4_valid:
raise ValidationError(
{"ipaddr4": f"IPv4 {ipv4} does not match any prefix on this ixlan"}
)
if ipv6 and not ipv6_valid:
raise ValidationError(
{"ipaddr6": f"IPv6 {ipv6} does not match any prefix on this ixlan"}
)
# Next we check if an active netixlan with the ipaddress exists in ANOTHER lan, and bail
# if it does.
if (
ipv4
and NetworkIXLan.objects.filter(status="ok", ipaddr4=ipv4)
.exclude(ixlan=self)
.count()
> 0
):
raise ValidationError(
{"ipaddr4": f"Ip address {ipv4} already exists in another lan"}
)
if (
ipv6
and NetworkIXLan.objects.filter(status="ok", ipaddr6=ipv6)
.exclude(ixlan=self)
.count()
> 0
):
raise ValidationError(
{"ipaddr6": f"Ip address {ipv6} already exists in another lan"}
)
# now we need to figure out if the ipaddresses already exist in this ixlan,
# we need to check ipv4 and ipv6 separately as they might exist on different
# netixlan objects.
try:
if ipv4:
netixlan_existing_v4 = NetworkIXLan.objects.get(
ixlan=self, ipaddr4=ipv4
)
else:
netixlan_existing_v4 = None
except NetworkIXLan.DoesNotExist:
netixlan_existing_v4 = None
try:
if ipv6:
netixlan_existing_v6 = NetworkIXLan.objects.get(
ixlan=self, ipaddr6=ipv6
)
else:
netixlan_existing_v6 = None
except NetworkIXLan.DoesNotExist:
netixlan_existing_v6 = None
# once we have that information we determine which netixlan object to use
if netixlan_existing_v4 and netixlan_existing_v6:
# both ips already exist
if netixlan_existing_v4 != netixlan_existing_v6:
# but they exist on different netixlans, so we reset the v6 netixlan
netixlan_existing_v6.ipaddr6 = None
if save:
netixlan_existing_v6.save()
# we use the existing v4 netixlan
netixlan = netixlan_existing_v4
elif netixlan_existing_v4:
# the v4 address exsits, but v6 doesnt so we use the netixlan with the v4 match
netixlan = netixlan_existing_v4
elif netixlan_existing_v6:
# the v6 address exists, but v4 does not so we use the netixlan with the v6 match
netixlan = netixlan_existing_v6
else:
# neither address exists, create a new netixlan object
netixlan = NetworkIXLan(
ixlan=self, network=netixlan_info.network, status="ok"
)
created = True
# now we sync the data to our determined netixlan instance
# IPv4
if ipv4 != netixlan.ipaddr4:
# we need to check if this ipaddress exists on a
# soft-deleted netixlan elsewhere, and
# reset if so.
#
# we only do this if ipaddr4 is not null
if ipv4:
for other in NetworkIXLan.objects.filter(
ipaddr4=ipv4, status="deleted"
).exclude(asn=asn):
other.ipaddr4 = None
other.notes = f"Ip address {ipv4} was claimed by other netixlan"
if save or save_others:
other.save()
netixlan.ipaddr4 = ipv4
changed.append("ipaddr4")
# IPv6
if ipv6 != netixlan.ipaddr6:
# we need to check if this ipaddress exists on a
# soft-deleted netixlan elsewhere, and
# reset if so.
#
# we only do this if ipaddr6 is not None
if ipv6:
for other in NetworkIXLan.objects.filter(
ipaddr6=ipv6, status="deleted"
).exclude(asn=asn):
other.ipaddr6 = None
other.notes = f"Ip address {ipv6} was claimed by other netixlan"
if save or save_others:
other.save()
netixlan.ipaddr6 = ipv6
changed.append("ipaddr6")
# Is the netixlan a routeserver ?
if netixlan_info.is_rs_peer != netixlan.is_rs_peer:
netixlan.is_rs_peer = netixlan_info.is_rs_peer
changed.append("is_rs_peer")
# Is the netixlan operational?
if netixlan_info.operational != netixlan.operational:
netixlan.operational = netixlan_info.operational
changed.append("operational")
# Speed
if netixlan_info.speed != netixlan.speed and (
netixlan_info.speed >= 0 or netixlan.speed is None
):
netixlan.speed = netixlan_info.speed
changed.append("speed")
# ASN
if netixlan_info.asn != netixlan.asn:
netixlan.asn = netixlan_info.asn
changed.append("asn")
# Network
if netixlan_info.network.id != netixlan.network.id:
netixlan.network = netixlan_info.network
changed.append("network_id")
if save and (changed or netixlan.status == "deleted"):
netixlan.status = "ok"
netixlan.full_clean()
netixlan.save()
return result(netixlan)
class IXLanIXFMemberImportAttempt(models.Model):
"""
Holds information about the most recent ixf member import
attempt for an ixlan.
"""
ixlan = models.OneToOneField(
IXLan,
on_delete=models.CASCADE,
primary_key=True,
related_name="ixf_import_attempt",
)
updated = models.DateTimeField(auto_now=True)
info = models.TextField(null=True, blank=True)
class IXLanIXFMemberImportLog(models.Model):
"""
Import log of a IX-F member import that changed or added at least one
netixlan under the specified ixlans.
"""
ixlan = models.ForeignKey(
IXLan, on_delete=models.CASCADE, related_name="ixf_import_log_set"
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _("IX-F Import Log")
verbose_name_plural = _("IX-F Import Logs")
@reversion.create_revision()
@transaction.atomic()
def rollback(self):
"""
Attempt to rollback the changes described in this log.
"""
for entry in self.entries.all().order_by("-id"):
if entry.rollback_status() == 0:
if entry.version_before:
entry.version_before.revert()
related = self.entries.filter(
netixlan=entry.netixlan,
).exclude(id=entry.id)
for _entry in related.order_by("-id"):
try:
_entry.version_before.revert()
except Exception:
break
elif entry.netixlan.status == "ok":
entry.netixlan.ipaddr4 = None
entry.netixlan.ipaddr6 = None
entry.netixlan.delete()
class IXLanIXFMemberImportLogEntry(models.Model):
"""
IX-F member import log entry that holds the affected netixlan and
the netixlan's version after the change, which can be used to rollback
the change.
"""
log = models.ForeignKey(
IXLanIXFMemberImportLog, on_delete=models.CASCADE, related_name="entries"
)
netixlan = models.ForeignKey(
"peeringdb_server.NetworkIXLan",
on_delete=models.CASCADE,
related_name="ixf_import_log_entries",
)
version_before = models.ForeignKey(
reversion.models.Version,
on_delete=models.CASCADE,
null=True,
related_name="ixf_import_log_before",
)
version_after = models.ForeignKey(
reversion.models.Version,
on_delete=models.CASCADE,
related_name="ixf_import_log_after",
)
action = models.CharField(max_length=255, null=True, blank=True)
reason = models.CharField(max_length=255, null=True, blank=True)
class Meta:
verbose_name = _("IX-F Import Log Entry")
verbose_name_plural = _("IX-F Import Log Entries")
@property
def changes(self):
"""
Returns a dict of changes between the netixlan version
saved by the ix-f import and the version before.
Fields `created`, `updated` and `version` will be ignored.
"""
if not self.version_before:
return {}
data_before = self.version_before.field_dict
data_after = self.version_after.field_dict
rv = {}
for k, v in list(data_after.items()):
if k in ["created", "updated", "version"]:
continue
v2 = data_before.get(k)
if v != v2:
if isinstance(v, ipaddress.IPv4Address) or isinstance(
v, ipaddress.IPv6Address
):
rv[k] = str(v)
else:
rv[k] = v
return rv
def rollback_status(self):
recent_version = reversion.models.Version.objects.get_for_object(
self.netixlan
).first()
if self.version_after == recent_version:
if self.netixlan.status == "deleted":
conflict_v4, conflict_v6 = self.netixlan.ipaddress_conflict()
if conflict_v4 or conflict_v6:
return 2
return 0
elif self.version_before == recent_version:
return -1
return 1
class NetworkProtocolsDisabled(ValueError):
"""
Raised when a network has both ipv6 and ipv4 support
disabled during ix-f import.
"""
class IXFMemberData(pdb_models.NetworkIXLanBase):
"""
Describes a potential data update that arose during an ix-f import
attempt for a specific member (asn, ip4, ip6) to netixlan
(asn, ip4, ip6) where the importer could not complete the
update automatically.
"""
data = models.TextField(
null=False,
default="{}",
help_text=_("JSON snapshot of the ix-f member data that " "created this entry"),
)
log = models.TextField(blank=True, help_text=_("Activity for this entry"))
dismissed = models.BooleanField(
default=False,
help_text=_(
"Network's dismissal of this proposed change, which will hide it until"
" from the customer facing network view"
),
)
is_rs_peer = models.BooleanField(
default=None, null=True, blank=True, help_text=_("RS Peer")
)
error = models.TextField(
null=True,
blank=True,
help_text=_("Trying to apply data to peeringdb raised an issue"),
)
reason = models.CharField(max_length=255, default="")
fetched = models.DateTimeField(
_("Last Fetched"),
)
ixlan = models.ForeignKey(IXLan, related_name="ixf_set", on_delete=models.CASCADE)
requirement_of = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="requirement_set",
null=True,
blank=True,
help_text=_(
"Requirement of another IXFMemberData entry "
"and will be applied alongside it"
),
)
deskpro_ref = models.CharField(
max_length=32,
null=True,
blank=True,
help_text=_("Ticket reference on the DeskPRO side"),
)
deskpro_id = models.IntegerField(
null=True, blank=True, help_text=_("Ticket id on the DeskPRO side")
)
# field names of fields that can receive
# modifications from ix-f
data_fields = [
"speed",
"operational",
"is_rs_peer",
]
class Meta:
db_table = "peeringdb_ixf_member_data"
verbose_name = _("IX-F Member Data")
verbose_name_plural = _("IX-F Member Data")
class HandleRef:
tag = "ixfmember"
@classmethod
def id_filters(cls, asn, ipaddr4, ipaddr6, check_protocols=True):
"""
Returns a dict of filters to use with a
IXFMemberData or NetworkIXLan query set
to retrieve a unique entry.
"""
net = Network.objects.get(asn=asn)
ipv4_support = net.ipv4_support or not check_protocols
ipv6_support = net.ipv6_support or not check_protocols
filters = {"asn": asn}
if ipv4_support:
if ipaddr4:
filters["ipaddr4"] = ipaddr4
else:
filters["ipaddr4__isnull"] = True
if ipv6_support:
if ipaddr6:
filters["ipaddr6"] = ipaddr6
else:
filters["ipaddr6__isnull"] = True
return filters
@classmethod
def instantiate(cls, asn, ipaddr4, ipaddr6, ixlan, **kwargs):
"""
Returns an IXFMemberData object.
It will take into consideration whether or not an instance
for this object already exists (as identified by asn and ip
addresses).
It will also update the value of `fetched` to now.
Keyword Argument(s):
- speed(int=0) : network speed (mbit)
- operational(bool=True): peer is operational
- is_rs_peer(bool=False): peer is route server
"""
fetched = datetime.datetime.now().replace(tzinfo=UTC())
net = Network.objects.get(asn=asn)
validate_network_protocols = kwargs.get("validate_network_protocols", True)
for_deletion = kwargs.get("delete", False)
try:
id_filters = cls.id_filters(asn, ipaddr4, ipaddr6)
instances = cls.objects.filter(**id_filters)
if not instances.exists():
raise cls.DoesNotExist()
if instances.count() > 1:
# this only happens when a network switches on/off
# ipv4/ipv6 protocol support inbetween importer
# runs.
for instance in instances:
if ipaddr4 != instance.ipaddr4 or ipaddr6 != instance.ipaddr6:
instance.delete(hard=True)
instance = cls.objects.get(**id_filters)
else:
instance = instances.first()
for field in cls.data_fields:
setattr(instance, f"previous_{field}", getattr(instance, field))
instance._previous_data = instance.data
instance._previous_error = instance.error
instance.fetched = fetched
instance._meta.get_field("updated").auto_now = False
instance.save()
instance._meta.get_field("updated").auto_now = True
except cls.DoesNotExist:
ip_args = {}
if net.ipv4_support or not ipaddr4 or for_deletion:
ip_args.update(ipaddr4=ipaddr4)
if net.ipv6_support or not ipaddr6 or for_deletion:
ip_args.update(ipaddr6=ipaddr6)
if not ip_args and validate_network_protocols:
raise NetworkProtocolsDisabled(
_(
"No suitable ipaddresses when validating against the enabled network protocols"
)
)
instance = cls(asn=asn, status="ok", **ip_args)
instance.speed = kwargs.get("speed", 0)
instance.operational = kwargs.get("operational", True)
instance.is_rs_peer = kwargs.get("is_rs_peer")
instance.ixlan = ixlan
instance.fetched = fetched
instance.for_deletion = for_deletion
if ipaddr4:
instance.init_ipaddr4 = ipaddress.ip_address(ipaddr4)
else:
instance.init_ipaddr4 = None
if ipaddr6:
instance.init_ipaddr6 = ipaddress.ip_address(ipaddr6)
else:
instance.init_ipaddr6 = None
if "data" in kwargs:
instance.set_data(kwargs.get("data"))
return instance
@classmethod
def get_for_network(cls, net):
"""
Returns queryset for IXFMemberData objects that match
a network's asn.
Argument(s):
- net(Network)
"""
return cls.objects.filter(asn=net.asn)
@classmethod
def dismissed_for_network(cls, net):
"""
Returns queryset for IXFMemberData objects that match
a network's asn and are currenlty flagged as dismissed.
Argument(s):
- net(Network)
"""
qset = cls.get_for_network(net).select_related("ixlan", "ixlan__ix")
qset = qset.filter(dismissed=True)
return qset
@classmethod
def network_has_dismissed_actionable(cls, net):
"""
Returns whether or not the specified network has
any dismissed IXFMemberData suggestions that are
actionable.
Argument(s):
- net(Network)
"""
for ixf_member_data in cls.dismissed_for_network(net):
if ixf_member_data.action != "noop":
return True
return False
@classmethod
def proposals_for_network(cls, net):
"""
Returns a dict containing actionable proposals for
a network.
```
{
<ix_id>: {
"ix": InternetExchange,
"add" : list(IXFMemberData),
"modify" : list(IXFMemberData),
"delete" : list(IXFMemberData),
}
}
```
Argument(s):
- net(Network)
"""
qset = cls.get_for_network(net).select_related("ixlan", "ixlan__ix")
proposals = {}
for ixf_member_data in qset:
action = ixf_member_data.action
ixf_member_data.error
# not actionable for anyone
if action == "noop":
continue
# not actionable for network
if not ixf_member_data.actionable_for_network:
continue
# dismissed by network
if ixf_member_data.dismissed:
continue
ix_id = ixf_member_data.ix.id
if ix_id not in proposals:
proposals[ix_id] = {
"ix": ixf_member_data.ix,
"add": [],
"delete": [],
"modify": [],
}
proposals[ix_id][action].append(ixf_member_data)
return sorted(proposals.values(), key=lambda x: x["ix"].name.lower())
@property
def previous_data(self):
return getattr(self, "_previous_data", "{}")
@property
def previous_error(self):
return getattr(self, "_previous_error", None)
@property
def json(self):
"""
Returns dict for self.data
"""
return json.loads(self.data)
@property
def net(self):
"""
Returns the Network instance related to
this entry.
"""
if not hasattr(self, "_net"):
self._net = Network.objects.get(asn=self.asn)
return self._net
@property
def actionable_for_network(self):
"""
Returns whether or not the proposed action by
this IXFMemberData instance is actionable by
the network.
"""
error = self.error
if error and "address outside of prefix" in error:
return False
if error and "does not match any prefix" in error:
return False
if error and "speed value" in error:
return False
return True
@property
def actionable_error(self):
"""
Returns whether or not the error is actionable
by exchange or network.
If actionable will return self.error otherwise
will return None.
"""
if not self.error:
return None
try:
error_data = json.loads(self.error)
except Exception:
return None
IPADDR_EXIST = "already exists"
DELETED_NETIXLAN_BAD_ASN = "This entity was created for the ASN"
if IPADDR_EXIST in error_data.get("ipaddr4", [""])[0]:
for requirement in self.requirements:
if requirement.netixlan.ipaddr4 == self.ipaddr4:
return None
if NetworkIXLan.objects.filter(
ipaddr4=self.ipaddr4, status="deleted"
).exists():
return None
if IPADDR_EXIST in error_data.get("ipaddr6", [""])[0]:
for requirement in self.requirements:
if requirement.netixlan.ipaddr6 == self.ipaddr6:
return None
if NetworkIXLan.objects.filter(
ipaddr6=self.ipaddr6, status="deleted"
).exists():
return None
if DELETED_NETIXLAN_BAD_ASN in error_data.get("__all__", [""])[0]:
if self.netixlan.status == "deleted":
return None
return self.error
@property
def net_contacts(self):
"""
Returns a list of email addresses that
are suitable contact points for conflict resolution
at the network's end.
"""
qset = self.net.poc_set_active.exclude(email="")
qset = qset.exclude(email__isnull=True)
role_priority = ["Technical", "NOC", "Policy"]
contacts = []
for role in role_priority:
for poc in qset.filter(role=role):
contacts.append(poc.email)
if contacts:
break
return list(set(contacts))
@property
def ix_contacts(self):
"""
Returns a list of email addresses that
are suitable contact points for conflict resolution
at the exchange end.
"""
return [self.ix.tech_email or self.ix.policy_email]
@property
def ix(self):
"""
Returns the InternetExchange instance related to
this entry.
"""
if not hasattr(self, "_ix"):
self._ix = self.ixlan.ix
return self._ix
@property
def ixf_id(self):
"""
Returns a tuple that identifies the ix-f member
as a unqiue record by asn, ip4 and ip6 address.
"""
return (self.asn, self.ipaddr4, self.ipaddr6)
@property
def ixf_id_pretty_str(self):
ipaddr4 = self.ipaddr4 or _("IPv4 not set")
ipaddr6 = self.ipaddr6 or _("IPv6 not set")
return f"AS{self.asn} - {ipaddr4} - {ipaddr6}"
@property
def actionable_changes(self):
self.requirements
_changes = self.changes
for requirement in self.requirements:
_changes.update(self._changes(requirement.netixlan))
if self.ipaddr4_on_requirement:
_changes.update(ipaddr4=self.ipaddr4_on_requirement)
if self.ipaddr6_on_requirement:
_changes.update(ipaddr6=self.ipaddr6_on_requirement)
return _changes
@property
def changes(self):
"""
Returns a dict of changes (field, value)
between this entry and the related netixlan.
If an empty dict is returned that means no changes.
```
{
<field_name> : {
"from" : <value>,
"to : <value>
}
}
```
"""
netixlan = self.netixlan
return self._changes(netixlan)
def _changes(self, netixlan):
changes = {}
if self.marked_for_removal or not netixlan:
return changes
if (
self.modify_is_rs_peer
and self.is_rs_peer is not None
and netixlan.is_rs_peer != self.is_rs_peer
):
changes.update(
is_rs_peer={"from": netixlan.is_rs_peer, "to": self.is_rs_peer}
)
if self.modify_speed and self.speed > 0 and netixlan.speed != self.speed:
changes.update(speed={"from": netixlan.speed, "to": self.speed})
if netixlan.operational != self.operational:
changes.update(
operational={"from": netixlan.operational, "to": self.operational}
)
if netixlan.status != self.status:
changes.update(status={"from": netixlan.status, "to": self.status})
return changes
@property
def modify_speed(self):
"""
Returns whether or not the `speed` property
is enabled to receive modify updates or not (#793).
"""
return False
@property
def modify_is_rs_peer(self):
"""
Returns whether or not the `is_rs_peer` property
is enabled to receive modify updates or not (#793).
"""
return False
@property
def changed_fields(self):
"""
Returns a comma separated string of field names
for changes proposed by this IXFMemberData instance.
"""
return ", ".join(list(self.changes.keys()))
@property
def remote_changes(self):
"""
Returns a dict of changed fields between previously
fetched IX-F data and current IX-F data.
If an empty dict is returned that means no changes.
```
{
<field_name> : {
"from" : <value>,
"to : <value>
}
}
```
"""
if not self.id and self.netixlan.id:
return {}
changes = {}
for field in self.data_fields:
old_v = getattr(self, f"previous_{field}", None)
v = getattr(self, field)
if old_v is not None and v != old_v:
changes[field] = {"from": old_v, "to": v}
return changes
@property
def remote_data_missing(self):
"""
Returns whether or not this IXFMemberData entry
had data at the IX-F source.
If not it indicates that it does not exist at the
ix-f source.
"""
return self.data == "{}" or not self.data
@property
def marked_for_removal(self):
"""
Returns whether or not this entry implies that
the related netixlan should be removed.
We do this by checking if the ix-f data was provided
or not.
"""
if not self.netixlan.id or self.netixlan.status == "deleted":
# edge-case that should not really happen
# non-existing netixlan cannot be removed
return False
return self.remote_data_missing
@property
def net_present_at_ix(self):
"""
Returns whether or not the network associated with
this IXFMemberData instance currently has a presence
at the exchange associated with this IXFMemberData
instance.
"""
return NetworkIXLan.objects.filter(
ixlan=self.ixlan, network=self.net, status="ok"
).exists()
@property
def action(self):
"""
Returns the implied action of applying this
entry to peeringdb.
Will return either "add", "modify", "delete" or "noop"
"""
has_data = self.remote_data_missing is False
action = "noop"
if has_data:
if not self.netixlan.id:
action = "add"
elif self.status == "ok" and self.netixlan.status == "deleted":
action = "add"
elif self.changes:
action = "modify"
else:
if self.marked_for_removal:
action = "delete"
# the proposal is to add a netixlan, but we have
# the requirement of a deletion of another netixlan
# that has one of the ips set but not the other.
#
# action re-classified to modify (#770)
if action == "add" and self.has_requirements:
if (
self.primary_requirement.asn == self.asn
and self.primary_requirement.action == "delete"
):
action = "modify"
return action
@property
def has_requirements(self):
"""
Return whether or not this IXFMemberData has
other IXFMemberData objects as requirements.
"""
return len(self.requirements) > 0
@property
def requirements(self):
"""
Returns list of all IXFMemberData objects
that are still active requirements for this
IXFMemberData object.
"""
return [
requirement
for requirement in self.requirement_set.all()
# if requirement.action != "noop"
]
@property
def primary_requirement(self):
"""
Return the initial requirement IXFMemberData
for this IXFMemberData instance, None if there
isn't any.
"""
try:
return self.requirements[0]
except IndexError:
return None
@property
def secondary_requirements(self):
"""
Return a list of secondary requirement IXFMemberData
objects for this IXFMemberData object. Currently this
only happens on add proposals that require two netixlans
to be deleted because both ipaddresses exist on separate
netixlans (#770).
"""
return self.requirements[1:]
@property
def ipaddr4_on_requirement(self):
"""
Returns true if the ipv4 address claimed by this IXFMemberData
object exists on one of its requirement IXFMemberData objects.
"""
ipaddr4 = self.ipaddr4
if not ipaddr4 and hasattr(self, "init_ipaddr4"):
ipaddr4 = self.init_ipaddr4
if not ipaddr4:
return False
for requirement in self.requirements:
if requirement.ipaddr4 == ipaddr4:
return True
return False
@property
def ipaddr6_on_requirement(self):
"""
Returns true if the ipv6 address claimed by this IXFMemberData
object exists on one of its requirement IXFMemberData objects.
"""
ipaddr6 = self.ipaddr6
if not ipaddr6 and hasattr(self, "init_ipaddr6"):
ipaddr6 = self.init_ipaddr6
if not ipaddr6:
return False
for requirement in self.requirements:
if requirement.ipaddr6 == ipaddr6:
return True
return False
@property
def netixlan(self):
"""
Will either return a matching existing netixlan
instance (asn,ip4,ip6) or a new netixlan if
a matching netixlan does not currently exist.
Any new netixlan will NOT be saved at this point.
Note that the netixlan that matched may be currently
soft-deleted (status=="deleted").
"""
if not hasattr(self, "_netixlan"):
if not hasattr(self, "for_deletion"):
self.for_deletion = self.remote_data_missing
try:
if self.for_deletion:
filters = self.id_filters(
self.asn, self.ipaddr4, self.ipaddr6, check_protocols=False
)
else:
filters = self.id_filters(self.asn, self.ipaddr4, self.ipaddr6)
if "ipaddr6" not in filters and "ipaddr4" not in filters:
raise NetworkIXLan.DoesNotExist()
self._netixlan = NetworkIXLan.objects.get(**filters)
except NetworkIXLan.DoesNotExist:
is_rs_peer = self.is_rs_peer
if is_rs_peer is None:
is_rs_peer = False
self._netixlan = NetworkIXLan(
ipaddr4=self.ipaddr4,
ipaddr6=self.ipaddr6,
speed=self.speed,
asn=self.asn,
operational=self.operational,
is_rs_peer=is_rs_peer,
ixlan=self.ixlan,
network=self.net,
status="ok",
)
return self._netixlan
@property
def netixlan_exists(self):
"""
Returns whether or not an active netixlan exists
for this IXFMemberData instance.
"""
return self.netixlan.id and self.netixlan.status != "deleted"
def __str__(self):
parts = [
self.ixlan.ix.name,
f"AS{self.asn}",
]
if self.ipaddr4:
parts.append(f"{self.ipaddr4}")
else:
parts.append("No IPv4")
if self.ipaddr6:
parts.append(f"{self.ipaddr6}")
else:
parts.append("No IPv6")
return " ".join(parts)
def set_requirement(self, ixf_member_data, save=True):
"""
Sets another IXFMemberData object to be a requirement
of the resolution of this IXFMemberData object.
"""
if not ixf_member_data:
return
if ixf_member_data in self.requirements:
return
if ixf_member_data.netixlan == self.netixlan:
return
ixf_member_data.requirement_of = self
if save:
ixf_member_data.save()
return ixf_member_data
def apply_requirements(self, save=True):
"""
Apply all requirements.
"""
for requirement in self.requirements:
requirement.apply(save=save)
def apply(self, user=None, comment=None, save=True):
"""
Applies the data.
This will either create, update or delete a netixlan
object.
Will return a dict containing action and netixlan
affected.
```
{
"action": <action(str)>
"netixlan": <NetworkIXLan>
}
```
Keyword Argument(s):
- user(User): if set will set the user on the
reversion revision
- comment(str): if set will set the comment on the
reversion revision
- save(bool=True): only persist changes to the database
if this is True
"""
if user and user.is_authenticated:
reversion.set_user(user)
if comment:
reversion.set_comment(comment)
self.apply_requirements(save=save)
action = self.action
netixlan = self.netixlan
self.changes
if action == "add":
self.validate_speed()
# Update data values
netixlan.speed = self.speed
netixlan.is_rs_peer = bool(self.is_rs_peer)
netixlan.operational = bool(self.operational)
if not self.net.ipv6_support:
netixlan.ipaddr6 = None
if not self.net.ipv4_support:
netixlan.ipaddr4 = None
result = self.ixlan.add_netixlan(netixlan, save=save, save_others=save)
self._netixlan = netixlan = result["netixlan"]
elif action == "modify":
self.validate_speed()
if self.modify_speed and self.speed:
netixlan.speed = self.speed
if self.modify_is_rs_peer and self.is_rs_peer is not None:
netixlan.is_rs_peer = self.is_rs_peer
netixlan.operational = self.operational
if save:
netixlan.full_clean()
netixlan.save()
elif action == "delete":
if save:
netixlan.delete()
return {"action": action, "netixlan": netixlan, "ixf_member_data": self}
def validate_speed(self):
"""
Speed errors in ix-f data are raised during parse
and speed will be on the attribute.
In order to properly handle invalid speed values,
check if speed is 0 and if there was a parsing
error for it, and if so raise a validation error.
TODO: find a better way to do this.
"""
if self.speed == 0 and self.error:
error_data = json.loads(self.error)
if "speed" in self.error:
raise ValidationError(error_data)
def save_without_update(self):
self._meta.get_field("updated").auto_now = False
self.save()
self._meta.get_field("updated").auto_now = True
def grab_validation_errors(self):
"""
This will attempt to validate the netixlan associated
with this IXFMemberData instance.
Any validation errors will be stored to self.error
"""
try:
self.netixlan.full_clean()
except ValidationError as exc:
self.error = json.dumps(exc, cls=ValidationErrorEncoder)
def set_resolved(self, save=True):
"""
Marks this IXFMemberData instance as resolved and
sends out notifications to ac,ix and net if
warranted.
This will delete the IXFMemberData instance.
"""
if self.id and save and not self.requirement_of_id:
self.delete(hard=True)
return True
def set_conflict(self, error=None, save=True):
"""
Persist this IXFMemberData instance and send out notifications
for conflict (validation issues) for modifications proposed
to the corresponding netixlan to ac, ix and net as warranted.
"""
if not self.id:
existing_conflict = IXFMemberData.objects.filter(
asn=self.asn, error__isnull=False
)
if self.ipaddr4 and self.ipaddr6:
existing_conflict = existing_conflict.filter(
models.Q(ipaddr4=self.ipaddr4) | models.Q(ipaddr6=self.ipaddr6)
)
elif self.ipaddr4:
existing_conflict = existing_conflict.filter(ipaddr4=self.ipaddr4)
elif self.ipaddr6:
existing_conflict = existing_conflict.filter(ipaddr6=self.ipaddr6)
if existing_conflict.exists():
return None
if (self.remote_changes or (error and not self.previous_error)) and save:
if error:
self.error = json.dumps(error, cls=ValidationErrorEncoder)
else:
self.error = None
self.dismissed = False
self.save()
return True
elif self.previous_data != self.data and save:
# since remote_changes only tracks changes to the
# relevant data fields speed, operational and is_rs_peer
# we check if the remote data has changed in general
# and force a save if it did
self.save_without_update()
def set_update(self, save=True, reason=""):
"""
Persist this IXFMemberData instance and send out notifications
for proposed modification to the corresponding netixlan
instance to ac, ix and net as warranted.
"""
self.reason = reason
if ((self.changes and not self.id) or self.remote_changes) and save:
self.grab_validation_errors()
self.dismissed = False
self.save()
return True
elif self.previous_data != self.data and save:
# since remote_changes only tracks changes to the
# relevant data fields speed, operational and is_rs_peer
# we check if the remote data has changed in general
# and force a save if it did
self.save_without_update()
def set_add(self, save=True, reason=""):
"""
Persist this IXFMemberData instance and send out notifications
for proposed creation of netixlan instance to ac, ix and net
as warranted.
"""
self.reason = reason
if not self.id and save:
self.grab_validation_errors()
self.save()
return True
elif self.previous_data != self.data and save:
# since remote_changes only tracks changes to the
# relevant data fields speed, operational and is_rs_peer
# we check if the remote data has changed in general
# and force a save if it did
self.save_without_update()
def set_remove(self, save=True, reason=""):
"""
Persist this IXFMemberData instance and send out notifications
for proposed removal of netixlan instance to ac, net and ix
as warranted.
"""
self.reason = reason
# we perist this ix-f member data that proposes removal
# if any of these conditions are met
# marked for removal, but not saved
not_saved = not self.id and self.marked_for_removal
# was in remote-data last time, gone now
gone = (
self.id
and getattr(self, "previous_data", "{}") != "{}"
and self.remote_data_missing
)
if (not_saved or gone) and save:
self.set_data({})
self.save()
return True
def set_data(self, data):
"""
Stores a dict in self.data as a json string.
"""
self.data = json.dumps(data)
def render_notification(self, template_file, recipient, context=None):
"""
Renders notification text for this ixfmemberdata
instance.
Argument(s):
- template_file(str): email template file
- recipient(str): ac, ix or net
- context(dict): if set will update the template context
from this
"""
_context = {
"instance": self,
"recipient": recipient,
"ixf_url": self.ixlan.ixf_ixp_member_list_url,
"ixf_url_public": (self.ixlan.ixf_ixp_member_list_url_visible == "Public"),
}
if context:
_context.update(context)
template = loader.get_template(template_file)
return template.render(_context)
@property
def ac_netixlan_url(self):
if not self.netixlan.id:
return ""
path = django.urls.reverse(
"admin:peeringdb_server_networkixlan_change",
args=(self.netixlan.id,),
)
return f"{settings.BASE_URL}{path}"
@property
def ac_url(self):
if not self.id:
return ""
path = django.urls.reverse(
"admin:peeringdb_server_ixfmemberdata_change",
args=(self.id,),
)
return f"{settings.BASE_URL}{path}"
# read only, or can make bigger, making smaller could break links
# validate could check
@grainy_model(
namespace="prefix",
namespace_instance="{instance.ixlan.grainy_namespace}.{namespace}.{instance.pk}",
)
@reversion.register
class IXLanPrefix(ProtectedMixin, pdb_models.IXLanPrefixBase):
"""
Descries a Prefix at an Exchange LAN.
"""
ixlan = models.ForeignKey(
IXLan, on_delete=models.CASCADE, default=0, related_name="ixpfx_set"
)
# override in_dfz to default it to True on the schema level (#761)
in_dfz = models.BooleanField(default=True)
@property
def descriptive_name(self):
"""
Returns a descriptive label of the ixpfx for logging purposes.
"""
return f"ixpfx{self.id} {self.prefix}"
@classmethod
def related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Filter queryset of ixpfx objects related to exchange via ix_id match
according to filter.
Relationship through ixlan -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter("ixlan__%s" % field, filt, value)
return qset.filter(**filt)
@classmethod
def whereis_ip(cls, ipaddr, qset=None):
"""
Filter queryset of ixpfx objects where the prefix contains
the supplied ipaddress.
"""
if not qset:
qset = cls.handleref.undeleted()
ids = []
ipaddr = ipaddress.ip_address(ipaddr)
for ixpfx in qset:
if ipaddr in ixpfx.prefix:
ids.append(ixpfx.id)
return qset.filter(id__in=ids)
def __str__(self):
return f"{self.prefix}"
def test_ip_address(self, addr):
"""
Checks if this prefix can contain the specified ip address.
Arguments:
- addr (ipaddress.IPv4Address or ipaddress.IPv6Address or unicode): ip address
to check, can be either ipv4 or 6 but should be pre-validated to be in the
correct format as this function will simply return False incase of format
validation errors.
Returns:
- bool: True if prefix can contain the specified address
- bool: False if prefix cannot contain the specified address
"""
try:
if not addr:
return False
if isinstance(addr, str):
addr = ipaddress.ip_address(addr)
return addr in ipaddress.ip_network(self.prefix)
except ipaddress.AddressValueError:
return False
except ValueError:
return False
@property
def deletable(self):
"""
Returns whether or not the prefix is currently
in a state where it can be marked as deleted.
This will be False for prefixes of which ANY
of the following is True:
- parent ixlan has netixlans that fall into
its address space
"""
prefix = self.prefix
can_delete = True
for netixlan in self.ixlan.netixlan_set_active:
if self.protocol == "IPv4":
if netixlan.ipaddr4 and netixlan.ipaddr4 in prefix:
can_delete = False
break
if self.protocol == "IPv6":
if netixlan.ipaddr6 and netixlan.ipaddr6 in prefix:
can_delete = False
break
if not can_delete:
self._not_deletable_reason = _(
"There are active peers at this exchange that fall into "
"this address space"
)
else:
self._not_deletable_reason = None
return can_delete
def clean(self):
"""
Custom model validation.
"""
status_error = _(
"IXLanPrefix with status '{}' cannot be linked to a IXLan with status '{}'."
).format(self.status, self.ixlan.status)
if self.ixlan.status == "pending" and self.status == "ok":
raise ValidationError(status_error)
elif self.ixlan.status == "deleted" and self.status in ["ok", "pending"]:
raise ValidationError(status_error)
# validate the specified prefix address
validate_address_space(self.prefix)
validate_prefix_overlap(self.prefix)
return super().clean()
@grainy_model(namespace="network", parent="org")
@reversion.register
class Network(pdb_models.NetworkBase):
"""
Describes a peeringdb network.
"""
org = models.ForeignKey(
Organization, on_delete=models.CASCADE, related_name="net_set"
)
allow_ixp_update = models.BooleanField(
null=False,
default=False,
help_text=_(
"Specifies whether an IXP is allowed to add a netixlan entry for this network via their ixp_member data"
),
)
netixlan_updated = models.DateTimeField(blank=True, null=True)
netfac_updated = models.DateTimeField(blank=True, null=True)
poc_updated = models.DateTimeField(blank=True, null=True)
ix_count = models.PositiveIntegerField(
_("number of exchanges at this network"),
help_text=_("number of exchanges at this network"),
null=False,
default=0,
)
fac_count = models.PositiveIntegerField(
_("number of facilities at this network"),
help_text=_("number of facilities at this network"),
null=False,
default=0,
)
@staticmethod
def autocomplete_search_fields():
return (
"id__iexact",
"name__icontains",
)
def __unicode__(self):
return self.name
@classmethod
@reversion.create_revision()
@transaction.atomic()
def create_from_rdap(cls, rdap, asn, org):
"""
Creates network from rdap result object.
"""
name = rdap.name
if not rdap.name:
name = "AS%d" % (asn)
if cls.objects.filter(name=name).exists():
net = cls.objects.create(org=org, asn=asn, name="%s !" % name, status="ok")
else:
net = cls.objects.create(org=org, asn=asn, name=name, status="ok")
return net, True
@classmethod
def related_to_fac(cls, value=None, filt=None, field="facility_id", qset=None):
"""
Filter queryset of Network objects related to the facility
specified by fac_id
Relationship through netfac -> fac
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkFacility.handleref.filter(**filt)
return qset.filter(id__in=[i.network_id for i in q])
@classmethod
def not_related_to_fac(cls, value=None, filt=None, field="facility_id", qset=None):
"""
Filter queryset of Network objects NOT related to the facility
specified by fac_id (as in networks NOT present at the facility)
Relationship through netfac -> fac
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkFacility.handleref.filter(**filt)
return qset.exclude(id__in=[i.network_id for i in q])
@classmethod
def related_to_netfac(cls, value=None, filt=None, field="id", qset=None):
"""
Filter queryset of Network objects related to the netfac link
specified by netfac_id
Relationship through netfac
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value, prefix="netfac")
q = NetworkFacility.handleref.filter(**filt)
return qset.filter(id__in=[i.network_id for i in q])
@classmethod
def related_to_netixlan(cls, value=None, filt=None, field="id", qset=None):
"""
Filter queryset of Network objects related to the netixlan link
specified by netixlan_id
Relationship through netixlan.
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value, prefix="netixlan")
q = NetworkIXLan.handleref.filter(**filt)
return qset.filter(id__in=[i.network_id for i in q])
@classmethod
def related_to_ixlan(cls, value=None, filt=None, field="ixlan_id", qset=None):
"""
Filter queryset of Network objects related to the ixlan
specified by ixlan_id
Relationship through netixlan -> ixlan
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = NetworkIXLan.handleref.filter(**filt)
return qset.filter(id__in=[i.network_id for i in q])
@classmethod
def related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Filter queryset of Network objects related to the ix
specified by ix_id
Relationship through netixlan -> ixlan -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter("ixlan__%s" % field, filt, value)
q = NetworkIXLan.handleref.select_related("ixlan").filter(**filt)
return qset.filter(id__in=[i.network_id for i in q])
@classmethod
def not_related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Filter queryset of Network objects not related to the ix
specified by ix_id (as in networks not present at the exchange).
Relationship through netixlan -> ixlan -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter("ixlan__%s" % field, filt, value)
q = NetworkIXLan.handleref.select_related("ixlan").filter(**filt)
return qset.exclude(id__in=[i.network_id for i in q])
@classmethod
def as_set_map(cls, qset=None):
"""
Returns a dict mapping asns to their irr_as_set value.
"""
if not qset:
qset = cls.objects.filter(status="ok").order_by("asn")
return {net.asn: net.irr_as_set for net in qset}
@property
def search_result_name(self):
"""
This will be the name displayed for quick search matches
of this entity.
"""
return f"{self.name} ({self.asn})"
@property
def netfac_set_active(self):
return self.netfac_set(manager="handleref").filter(status="ok")
@property
def netixlan_set_active(self):
return self.netixlan_set(manager="handleref").filter(status="ok")
@property
def ixlan_set_active(self):
"""
Returns IXLan queryset for ixlans connected to this network
through NetworkIXLan.
"""
ixlan_ids = []
for netixlan in self.netixlan_set_active:
if netixlan.ixlan_id not in ixlan_ids:
ixlan_ids.append(netixlan.ixlan_id)
return IXLan.objects.filter(id__in=ixlan_ids)
@property
def ixlan_set_ixf_enabled(self):
"""
Returns IXLan queryset for IX-F import enabled ixlans connected
to this network through NetworkIXLan.
"""
qset = self.ixlan_set_active.filter(ixf_ixp_import_enabled=True)
qset = qset.exclude(ixf_ixp_member_list_url__isnull=True)
return qset
@property
def poc_set_active(self):
return self.poc_set(manager="handleref").filter(status="ok")
@property
def ipv4_support(self):
# network has not indicated either ip4 or ip6 support
# so assume True (#771)
if not self.info_unicast and not self.info_ipv6:
return True
return self.info_unicast
@property
def ipv6_support(self):
# network has not indicated either ip4 or ip6 support
# so assume True (#771)
if not self.info_unicast and not self.info_ipv6:
return True
return self.info_ipv6
@property
def sponsorship(self):
return self.org.sponsorship
@property
def view_url(self):
"""
Return the URL to this networks web view.
"""
return "{}{}".format(
settings.BASE_URL, django.urls.reverse("net-view", args=(self.id,))
)
@property
def view_url_asn(self):
"""
Return the URL to this networks web view.
"""
return "{}{}".format(
settings.BASE_URL, django.urls.reverse("net-view-asn", args=(self.asn,))
)
def clean(self):
"""
Custom model validation.
"""
try:
validate_info_prefixes4(self.info_prefixes4)
except ValidationError as exc:
raise ValidationError({"info_prefixes4": exc})
try:
validate_info_prefixes6(self.info_prefixes6)
except ValidationError as exc:
raise ValidationError({"info_prefixes6": exc})
try:
if self.irr_as_set:
self.irr_as_set = validate_irr_as_set(self.irr_as_set)
except ValidationError as exc:
raise ValidationError({"irr_as_set": exc})
return super().clean()
# class NetworkContact(HandleRefModel):
@grainy_model(
namespace="poc_set",
namespace_instance="{namespace}.{instance.visible}",
parent="network",
)
@reversion.register
class NetworkContact(ProtectedMixin, pdb_models.ContactBase):
"""
Describes a contact point (phone, email etc.) for a network.
"""
# id = models.AutoField(primary_key=True)
network = models.ForeignKey(
Network, on_delete=models.CASCADE, default=0, related_name="poc_set"
)
TECH_ROLES = ["Technical", "NOC", "Policy"]
class Meta:
db_table = "peeringdb_network_contact"
@property
def is_tech_contact(self):
return self.role in self.TECH_ROLES
@property
def deletable(self):
"""
Returns whether or not the poc is currently
in a state where it can be marked as deleted.
This will be False for pocs that are the last remaining
technical contact point for a network that has
active netixlans. (#923)
"""
# non-technical pocs can always be deleted
if not self.is_tech_contact:
self._not_deletable_reason = None
return True
netixlan_count = self.network.netixlan_set_active.count()
tech_poc_count = self.network.poc_set_active.filter(
role__in=self.TECH_ROLES
).count()
if netixlan_count and tech_poc_count == 1:
# there are active netixlans and this poc is the
# only technical poc left
self._not_deletable_reason = _(
"Last technical contact point for network with active peers"
)
return False
else:
self._not_deletable_reason = None
return True
def clean(self):
self.phone = validate_phonenumber(self.phone)
self.visible = validate_poc_visible(self.visible)
@grainy_model(namespace="netfac", parent="network")
@reversion.register
class NetworkFacility(pdb_models.NetworkFacilityBase):
"""
Describes a network <-> facility relationship.
"""
network = models.ForeignKey(
Network, on_delete=models.CASCADE, default=0, related_name="netfac_set"
)
facility = models.ForeignKey(
Facility, on_delete=models.CASCADE, default=0, related_name="netfac_set"
)
class Meta:
db_table = "peeringdb_network_facility"
unique_together = ("network", "facility", "local_asn")
@classmethod
def related_to_name(cls, value=None, filt=None, field="facility__name", qset=None):
"""
Filter queryset of netfac objects related to facilities with name match
in facility__name according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(**make_relation_filter(field, filt, value))
@classmethod
def related_to_country(
cls, value=None, filt=None, field="facility__country", qset=None
):
"""
Filter queryset of netfac objects related to country via match
in facility__country according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.filter(status="ok")
return qset.filter(**make_relation_filter(field, filt, value))
@classmethod
def related_to_city(cls, value=None, filt=None, field="facility__city", qset=None):
"""
Filter queryset of netfac objects related to city via match
in facility__city according to filter.
Relationship through facility.
"""
if not qset:
qset = cls.handleref.undeleted()
return qset.filter(**make_relation_filter(field, filt, value))
@property
def descriptive_name(self):
"""
Returns a descriptive label of the netfac for logging purposes.
"""
return "netfac{} AS{} {} <-> {}".format(
self.id, self.network.asn, self.network.name, self.facility.name
)
def clean(self):
# when validating an existing netfac that has a mismatching
# local_asn value raise a validation error stating that it needs
# to be moved
#
# this is to catch and force correction of instances where they
# could not be migrated automatically during rollout of #168
# because the targeted local_asn did not exist in peeringdb
if self.id and self.local_asn != self.network.asn:
raise ValidationError(
_(
"This entity was created for the ASN {} - please remove it from this network and recreate it under the correct network"
).format(self.local_asn)
)
# `local_asn` will eventually be dropped from the schema
# for now make sure it is always a match to the related
# network (#168)
self.local_asn = self.network.asn
def format_speed(value):
if value >= 1000000:
value = value / 10 ** 6
if not value % 1:
return f"{value:.0f}T"
return f"{value:.1f}T"
elif value >= 1000:
return f"{value / 10 ** 3:.0f}G"
else:
return f"{value:.0f}M"
@grainy_model(namespace="ixlan", parent="network")
@reversion.register
class NetworkIXLan(pdb_models.NetworkIXLanBase):
"""
Describes a network relationship to an IX through an IX Lan.
"""
network = models.ForeignKey(
Network, on_delete=models.CASCADE, default=0, related_name="netixlan_set"
)
ixlan = models.ForeignKey(
IXLan, on_delete=models.CASCADE, default=0, related_name="netixlan_set"
)
class Meta:
db_table = "peeringdb_network_ixlan"
constraints = [
models.UniqueConstraint(fields=["ipaddr4"], name="unique_ipaddr4"),
models.UniqueConstraint(fields=["ipaddr6"], name="unique_ipaddr6"),
]
@property
def name(self):
return ""
@property
def descriptive_name(self):
"""
Returns a descriptive label of the netixlan for logging purposes.
"""
return "netixlan{} AS{} {} {}".format(
self.id, self.asn, self.ipaddr4, self.ipaddr6
)
@property
def ix_name(self):
"""
Returns the exchange name for this netixlan.
"""
return self.ixlan.ix.name
@property
def ix_id(self):
"""
Returns the exchange id for this netixlan.
"""
return self.ixlan.ix_id
@property
def ixf_id(self):
"""
Returns a tuple that identifies the netixlan
in the context of an ix-f member data entry as a unqiue record by asn, ip4 and ip6 address.
"""
self.network
return (self.asn, self.ipaddr4, self.ipaddr6)
@property
def ixf_id_pretty_str(self):
asn, ipaddr4, ipaddr6 = self.ixf_id
ipaddr4 = ipaddr4 or _("IPv4 not set")
ipaddr6 = ipaddr6 or _("IPv6 not set")
return f"AS{asn} - {ipaddr4} - {ipaddr6}"
@classmethod
def related_to_ix(cls, value=None, filt=None, field="ix_id", qset=None):
"""
Filter queryset of netixlan objects related to the ix
specified by ix_id
Relationship through ixlan -> ix
"""
if not qset:
qset = cls.handleref.undeleted()
filt = make_relation_filter(field, filt, value)
q = IXLan.handleref.select_related("ix").filter(**filt)
return qset.filter(ixlan_id__in=[i.id for i in q])
@classmethod
def related_to_name(cls, value=None, filt=None, field="ix__name", qset=None):
"""
Filter queryset of netixlan objects related to exchange via a name match
according to filter.
Relationship through ixlan -> ix
"""
return cls.related_to_ix(value=value, filt=filt, field=field, qset=qset)
def ipaddress_conflict(self):
"""
Checks whether the ip addresses specified on this netixlan
exist on another netixlan (with status="ok").
Returns:
- tuple(bool, bool): tuple of two booleans, first boolean is
true if there was a conflict with the ip4 address, second
boolean is true if there was a conflict with the ip6
address
"""
ipv4 = NetworkIXLan.objects.filter(ipaddr4=self.ipaddr4, status="ok").exclude(
id=self.id
)
ipv6 = NetworkIXLan.objects.filter(ipaddr6=self.ipaddr6, status="ok").exclude(
id=self.id
)
conflict_v4 = self.ipaddr4 and ipv4.exists()
conflict_v6 = self.ipaddr6 and ipv6.exists()
return (conflict_v4, conflict_v6)
def validate_ipaddr4(self):
if self.ipaddr4 and not self.ixlan.test_ipv4_address(self.ipaddr4):
raise ValidationError(_("IPv4 address outside of prefix"))
def validate_ipaddr6(self):
if self.ipaddr6 and not self.ixlan.test_ipv6_address(self.ipaddr6):
raise ValidationError(_("IPv6 address outside of prefix"))
def validate_speed(self):
if self.speed in [None, 0]:
pass
# bypass validation according to #741
elif bypass_validation():
return
elif self.speed > settings.DATA_QUALITY_MAX_SPEED:
raise ValidationError(
_("Maximum speed: {}").format(
format_speed(settings.DATA_QUALITY_MAX_SPEED)
)
)
elif self.speed < settings.DATA_QUALITY_MIN_SPEED:
raise ValidationError(
_("Minimum speed: {}").format(
format_speed(settings.DATA_QUALITY_MIN_SPEED)
)
)
def clean(self):
"""
Custom model validation.
"""
errors = {}
# check that the ip address can be validated agaisnt
# at least one of the prefix on the parent ixlan
try:
self.validate_ipaddr4()
except ValidationError as exc:
errors["ipaddr4"] = exc.message
try:
self.validate_ipaddr6()
except ValidationError as exc:
errors["ipaddr6"] = exc.message
try:
self.validate_speed()
except ValidationError as exc:
errors["speed"] = exc.message
if errors:
raise ValidationError(errors)
# make sure this ip address is not claimed anywhere else
conflict_v4, conflict_v6 = self.ipaddress_conflict()
if conflict_v4:
errors["ipaddr4"] = _("Ip address already exists elsewhere")
if conflict_v6:
errors["ipaddr6"] = _("Ip address already exists elsewhere")
if errors:
raise ValidationError(errors)
# when validating an existing netixlan that has a mismatching
# asn value raise a validation error stating that it needs
# to be moved
#
# this is to catch and force correction of instances where they
# could not be migrated automatically during rollout of #168
# because the targeted asn did not exist in peeringdb
if self.id and self.asn != self.network.asn:
raise ValidationError(
_(
"This entity was created for the ASN {} - please remove it from this network and recreate it under the correct network"
).format(self.asn)
)
# `asn` will eventually be dropped from the schema
# for now make sure it is always a match to the related
# network (#168)
self.asn = self.network.asn
def ipaddr(self, version):
"""
Return the netixlan's ipaddr for ip version.
"""
if version == 4:
return self.ipaddr4
elif version == 6:
return self.ipaddr6
raise ValueError(f"Invalid ip version {version}")
def descriptive_name_ipv(self, version):
"""
Returns a descriptive label of the netixlan for logging purposes.
Will only contain the ipaddress matching the specified version.
"""
return f"netixlan{self.id} AS{self.asn} {self.ipaddr(version)}"
class User(AbstractBaseUser, PermissionsMixin):
"""
Proper length fields user.
"""
username = models.CharField(
_("username"),
max_length=254,
unique=True,
help_text=_("Required. Letters, digits and [@.+-/_=|] only."),
validators=[
validators.RegexValidator(
r"^[\w\.@+-=|/]+$",
_("Enter a valid username."),
"invalid",
flags=re.UNICODE,
)
],
)
email = models.EmailField(_("email address"), max_length=254)
first_name = models.CharField(_("first name"), max_length=254, blank=True)
last_name = models.CharField(_("last name"), max_length=254, blank=True)
is_staff = models.BooleanField(
_("staff status"),
default=False,
help_text=_("Designates whether the user can log into admin site."),
)
is_active = models.BooleanField(
_("active"),
default=True,
help_text=_(
"Designates whether this user should be treated as active. Unselect this instead of deleting accounts."
),
)
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
created = CreatedDateTimeField()
updated = UpdatedDateTimeField()
status = models.CharField(_("status"), max_length=254, default="ok")
locale = models.CharField(_("language"), max_length=62, blank=True, null=True)
objects = UserManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
class Meta:
db_table = "peeringdb_user"
verbose_name = _("user")
verbose_name_plural = _("users")
@property
def pending_affiliation_requests(self):
"""
Returns the currently pending user -> org affiliation
requests for this user.
"""
return self.affiliation_requests.filter(status="pending").order_by("-created")
@property
def affiliation_requests_available(self):
"""
Returns whether the user currently has any affiliation request
slots available by checking that the number of pending affiliation requests
the user has is lower than MAX_USER_AFFILIATION_REQUESTS
"""
return (
self.pending_affiliation_requests.count()
< settings.MAX_USER_AFFILIATION_REQUESTS
)
@property
def organizations(self):
"""
Returns all organizations this user is a member of.
"""
ids = []
for group in self.groups.all():
m = re.match(r"^org\.(\d+).*$", group.name)
if m and int(m.group(1)) not in ids:
ids.append(int(m.group(1)))
return [org for org in Organization.objects.filter(id__in=ids, status="ok")]
@property
def networks(self):
"""
Returns all networks this user is a member of.
"""
return list(
chain.from_iterable(org.net_set_active.all() for org in self.organizations)
)
@property
def full_name(self):
return f"{self.first_name} {self.last_name}"
@property
def has_oauth(self):
return SocialAccount.objects.filter(user=self).count() > 0
@property
def email_confirmed(self):
"""
Returns True if the email specified by the user has
been confirmed, False if not.
"""
try:
email = EmailAddress.objects.get(user=self, email=self.email, primary=True)
except EmailAddress.DoesNotExist:
return False
return email.verified
@property
def is_verified_user(self):
"""
Returns whether the user is verified (e.g., has been validated
by PDB staff).
Currently this is accomplished by checking if the user
has been added to the 'user' user group.
"""
group = Group.objects.get(id=settings.USER_GROUP_ID)
return group in self.groups.all()
@staticmethod
def autocomplete_search_fields():
"""
Used by grappelli autocomplete to determine what
fields to search in.
"""
return ("username__icontains", "email__icontains", "last_name__icontains")
def related_label(self):
"""
Used by grappelli autocomplete for representation.
"""
return f"{self.username} <{self.email}> ({self.id})"
def flush_affiliation_requests(self):
"""
Removes all user -> org affiliation requests for this user
that have been denied or canceled.
"""
UserOrgAffiliationRequest.objects.filter(
user=self, status__in=["denied", "canceled"]
).delete()
def recheck_affiliation_requests(self):
"""
Will reevaluate pending affiliation requests to unclaimed
ASN orgs.
This allows a user with such a pending affiliation request to
change ther email and recheck against rdap data for automatic
ownership approval. (#375)
"""
for req in self.pending_affiliation_requests.filter(asn__gt=0):
# we dont want to re-evaluate for affiliation requests
# with organizations that already have admin users managing them
if req.org_id and req.org.admin_usergroup.user_set.exists():
continue
# cancel current request
req.delete()
# reopen request
UserOrgAffiliationRequest.objects.create(
user=self, org=req.org, asn=req.asn, status="pending"
)
def get_locale(self):
"Returns user preferred language."
return self.locale
def set_locale(self, locale):
"Returns user preferred language."
self.locale = locale
self.save()
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = f"{self.first_name} {self.last_name}"
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=settings.DEFAULT_FROM_EMAIL):
"""
Sends an email to this User.
"""
if not getattr(settings, "MAIL_DEBUG", False):
mail = EmailMultiAlternatives(
subject,
message,
from_email,
[self.email],
headers={"Auto-Submitted": "auto-generated", "Return-Path": "<>"},
)
mail.send(fail_silently=False)
else:
debug_mail(subject, message, from_email, [self.email])
def set_unverified(self):
"""
Remove user from 'user' group.
Add user to 'guest' group.
"""
guest_group = Group.objects.get(id=settings.GUEST_GROUP_ID)
user_group = Group.objects.get(id=settings.USER_GROUP_ID)
groups = self.groups.all()
if guest_group not in groups:
guest_group.user_set.add(self)
if user_group in groups:
user_group.user_set.remove(self)
self.status = "pending"
self.save()
def set_verified(self):
"""
Add user to 'user' group.
Remove user from 'guest' group.
"""
guest_group = Group.objects.get(id=settings.GUEST_GROUP_ID)
user_group = Group.objects.get(id=settings.USER_GROUP_ID)
groups = self.groups.all()
if guest_group in groups:
guest_group.user_set.remove(self)
if user_group not in groups:
user_group.user_set.add(self)
self.status = "ok"
self.save()
def send_email_confirmation(self, request=None, signup=False):
"""
Use allauth email-confirmation process to make user
confirm that the email they provided is theirs.
"""
# check if there is actually an email set on the user
if not self.email:
return None
# allauth supports multiple email addresses per user, however
# we don't need that, so we check for the primary email address
# and if it already exist we make sure to update it to the
# email address currently specified on the user instance
try:
email = EmailAddress.objects.get(email=self.email)
email.email = self.email
email.user = self
email.verified = False
try:
EmailConfirmation.objects.get(email_address=email).delete()
except EmailConfirmation.DoesNotExist:
pass
except EmailAddress.DoesNotExist:
if EmailAddress.objects.filter(user=self).exists():
EmailAddress.objects.filter(user=self).delete()
email = EmailAddress(user=self, email=self.email, primary=True)
email.save()
email.send_confirmation(request=request, signup=signup)
return email
def password_reset_complete(self, token, password):
if self.password_reset.match(token):
self.set_password(password)
self.save()
self.password_reset.delete()
def password_reset_initiate(self):
"""
Initiate the password reset process for the user.
"""
# pylint: disable=access-member-before-definition
if self.id:
try:
self.password_reset.delete()
except UserPasswordReset.DoesNotExist:
pass
token, hashed = password_reset_token()
self.password_reset = UserPasswordReset.objects.create(
user=self, token=hashed
)
template = loader.get_template("email/password-reset.txt")
with override(self.locale):
self.email_user(
_("Password Reset Initiated"),
template.render(
{
"user": self,
"token": token,
"password_reset_url": settings.PASSWORD_RESET_URL,
}
),
)
return token, hashed
return None, None
def vq_approve(self):
self.set_verified()
def is_org_member(self, org):
return self.groups.filter(id=org.usergroup.id).exists()
def is_org_admin(self, org):
return self.groups.filter(id=org.admin_usergroup.id).exists()
def validate_rdap_relationship(self, rdap):
"""
#Domain only matching
email_domain = self.email.split("@")[1]
for email in rdap.emails:
try:
domain = email.split("@")[1]
if email_domain == domain:
return True
except IndexError, inst:
pass
"""
# Exact email matching
for email in rdap.emails:
if email.lower() == self.email.lower():
return True
return False
class UserAPIKey(AbstractAPIKey):
"""
An API Key managed by a user. Can be readonly or can take on the
permissions of the User.
"""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="api_keys",
)
readonly = models.BooleanField(
default=False,
help_text=_(
"Determines if API Key inherits the User Permissions or is readonly."
),
)
class Meta(AbstractAPIKey.Meta):
verbose_name = "User API key"
verbose_name_plural = "User API keys"
db_table = "peeringdb_user_api_key"
def password_reset_token():
token = str(uuid.uuid4())
hashed = sha256_crypt.hash(token)
return token, hashed
class IXFImportEmail(models.Model):
"""
A copy of all emails sent by the IX-F importer.
"""
subject = models.CharField(max_length=255, blank=False)
message = models.TextField(blank=False)
recipients = models.CharField(max_length=255, blank=False)
created = models.DateTimeField(auto_now_add=True)
sent = models.DateTimeField(blank=True, null=True)
net = models.ForeignKey(
Network,
on_delete=models.CASCADE,
related_name="network_email_set",
blank=True,
null=True,
)
ix = models.ForeignKey(
InternetExchange,
on_delete=models.CASCADE,
related_name="ix_email_set",
blank=True,
null=True,
)
class Meta:
verbose_name = _("IX-F Import Email")
verbose_name_plural = _("IX-F Import Emails")
class UserPasswordReset(models.Model):
class Meta:
db_table = "peeringdb_user_password_reset"
user = models.OneToOneField(
User, on_delete=models.CASCADE, primary_key=True, related_name="password_reset"
)
token = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
def is_valid(self):
valid_until = self.created + datetime.timedelta(hours=2)
if datetime.datetime.now().replace(tzinfo=UTC()) > valid_until:
return False
return True
def match(self, token):
return sha256_crypt.verify(token, self.token)
class CommandLineTool(models.Model):
"""
Describes command line tool execution by a staff user inside the
control panel (admin).
"""
tool = models.CharField(
max_length=255, help_text=_("name of the tool"), choices=COMMANDLINE_TOOLS
)
arguments = models.TextField(
help_text=_("json serialization of arguments and options passed")
)
result = models.TextField(null=True, blank=True, help_text=_("result log"))
description = models.CharField(
max_length=255,
help_text=_("Descriptive text of command that can be searched"),
null=True,
blank=True,
)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text=_("the user that ran this command"),
related_name="clt_history",
)
created = models.DateTimeField(
auto_now_add=True, help_text=_("command was run at this date and time")
)
status = models.CharField(
max_length=255,
default="done",
choices=[
("done", _("Done")),
("waiting", _("Waiting")),
("running", _("Running")),
],
)
def __str__(self):
return f"{self.tool}: {self.description}"
def set_waiting(self):
self.status = "waiting"
def set_done(self):
self.status = "done"
def set_running(self):
self.status = "running"
class EnvironmentSetting(models.Model):
"""
Environment settings overrides controlled through
django admin (/cp).
"""
class Meta:
db_table = "peeringdb_settings"
verbose_name = _("Environment Setting")
verbose_name_plural = _("Environment Settings")
setting = models.CharField(
max_length=255,
choices=(
(
"IXF_IMPORTER_DAYS_UNTIL_TICKET",
_("IX-F Importer: Days until DeskPRO ticket is created"),
),
),
unique=True,
)
value_str = models.CharField(max_length=255, blank=True, null=True)
value_int = models.IntegerField(blank=True, null=True)
value_bool = models.BooleanField(blank=True, default=False)
value_float = models.FloatField(blank=True, null=True)
updated = models.DateTimeField(
_("Last Updated"),
auto_now=True,
null=True,
blank=True,
)
created = models.DateTimeField(
_("Configured on"),
auto_now_add=True,
blank=True,
null=True,
)
user = models.ForeignKey(
User,
null=True,
on_delete=models.SET_NULL,
related_name="admincom_setting_set",
help_text=_("Last updated by this user"),
)
setting_to_field = {
"IXF_IMPORTER_DAYS_UNTIL_TICKET": "value_int",
}
@classmethod
def get_setting_value(cls, setting):
"""
Get the current value of the setting specified by
its setting name.
If no instance has been saved for the specified setting
the default value will be returned.
"""
try:
instance = cls.objects.get(setting=setting)
return instance.value
except cls.DoesNotExist:
return getattr(settings, setting)
@property
def value(self):
"""
Get the value for this setting.
"""
return getattr(self, self.setting_to_field[self.setting])
def set_value(self, value):
"""
Update the value for this setting.
"""
setattr(self, self.setting_to_field[self.setting], value)
self.full_clean()
self.save()
REFTAG_MAP = {
cls.handleref.tag: cls
for cls in [
Organization,
Network,
Facility,
InternetExchange,
InternetExchangeFacility,
NetworkFacility,
NetworkIXLan,
NetworkContact,
IXLan,
IXLanPrefix,
]
}
QUEUE_ENABLED = []
QUEUE_NOTIFY = []
if not getattr(settings, "DISABLE_VERIFICATION_QUEUE", False):
# enable verification queue for these models
QUEUE_ENABLED = (User, InternetExchange, Network, Facility, Organization)
if not getattr(settings, "DISABLE_VERIFICATION_QUEUE_EMAILS", False):
# send admin notification emails for these models
QUEUE_NOTIFY = (InternetExchange, Network, Facility, Organization)
| 30.160792
| 170
| 0.597701
|
88d539daef626bd71f775af32c6056e5c89f330a
| 3,018
|
py
|
Python
|
src/compas/geometry/trimesh/remesh.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
src/compas/geometry/trimesh/remesh.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
src/compas/geometry/trimesh/remesh.py
|
ricardoavelino/compas
|
e3c7f004b8839f96bf01f9f6b21a75786c3f59fa
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.plugins import pluggable
__all__ = [
'trimesh_remesh',
'trimesh_remesh_constrained',
'trimesh_remesh_along_isoline',
]
@pluggable(category='trimesh')
def trimesh_remesh(mesh, target_edge_length, number_of_iterations=10, do_project=True):
"""Remeshing of a triangle mesh.
Parameters
----------
mesh : tuple[sequence[[float, float, float] | :class:`~compas.geometry.Point`], sequence[[int, int, int]]]
A mesh represented by a list of vertices and a list of faces.
target_edge_length : float
The target edge length.
number_of_iterations : int, optional
Number of remeshing iterations.
do_project : bool, optional
Reproject vertices onto the input surface when they are created or displaced.
Returns
-------
list[[float, float, float]]
Vertices of the remeshed mesh.
list[[int, int, int]]
Faces of the remeshed mesh.
Notes
-----
This remeshing function only constrains the edges on the boundary of the mesh.
To protect specific features or edges, please use :func:`remesh_constrained`.
"""
raise NotImplementedError
@pluggable(category='trimesh')
def trimesh_remesh_constrained(mesh, target_edge_length, protected_edges, number_of_iterations=10, do_project=True):
"""Constrained remeshing of a triangle mesh.
Parameters
----------
mesh : tuple[sequence[[float, float, float] | :class:`~compas.geometry.Point`], sequence[[int, int, int]]]
A mesh represented by a list of vertices and a list of faces.
target_edge_length : float
The target edge length.
protected_edges : list[[int, int]]
A list of vertex pairs that identify protected edges of the mesh.
number_of_iterations : int, optional
Number of remeshing iterations.
do_project : bool, optional
Reproject vertices onto the input surface when they are created or displaced.
Returns
-------
list[[float, float, float]]
Vertices of the remeshed mesh.
list[[int, int, int]]
Faces of the remeshed mesh.
"""
raise NotImplementedError
@pluggable(category='trimesh')
def trimesh_remesh_along_isoline(mesh, scalarfield, scalar):
"""Remesh a mesh along an isoline of a scalarfield over the vertices.
Parameters
----------
mesh : tuple[sequence[[float, float, float] | :class:`~compas.geometry.Point`], sequence[[int, int, int]]]
A mesh represented by a list of vertices and a list of faces.
scalarfield : sequence[float]
A scalar value per vertex of the mesh.
scalar : float
A value within the range of the scalarfield.
Returns
-------
list[[float, float, float]]
Vertices of the remeshed mesh.
list[[int, int, int]]
Faces of the remeshed mesh.
Examples
--------
>>>
"""
raise NotImplementedError
| 30.18
| 116
| 0.674288
|
6315a92cd94ac708da1f2ba23a28f482f560bc04
| 19,181
|
py
|
Python
|
tests/unit/test_http_session.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_http_session.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_http_session.py
|
vemel/botocore
|
72039648c2880379e512824332c76eb5bf73ed34
|
[
"Apache-2.0"
] | null | null | null |
import socket
import pytest
from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError
from botocore.awsrequest import (
AWSHTTPConnectionPool,
AWSHTTPSConnectionPool,
AWSRequest,
)
from botocore.exceptions import (
ConnectionClosedError,
EndpointConnectionError,
ProxyConnectionError,
)
from botocore.httpsession import (
ProxyConfiguration,
URLLib3Session,
get_cert_path,
mask_proxy_url,
)
from tests import mock, unittest
class TestProxyConfiguration(unittest.TestCase):
def setUp(self):
self.url = 'http://localhost/'
self.auth_url = 'http://user:pass@localhost/'
self.proxy_config = ProxyConfiguration(
proxies={'http': 'http://localhost:8081/'}
)
def update_http_proxy(self, url):
self.proxy_config = ProxyConfiguration(
proxies={'http': url}
)
def test_construct_proxy_headers_with_auth(self):
headers = self.proxy_config.proxy_headers_for(self.auth_url)
proxy_auth = headers.get('Proxy-Authorization')
self.assertEqual('Basic dXNlcjpwYXNz', proxy_auth)
def test_construct_proxy_headers_without_auth(self):
headers = self.proxy_config.proxy_headers_for(self.url)
self.assertEqual({}, headers)
def test_proxy_for_url_no_slashes(self):
self.update_http_proxy('localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_proxy_for_url_no_protocol(self):
self.update_http_proxy('//localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_fix_proxy_url_has_protocol_http(self):
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
class TestHttpSessionUtils(unittest.TestCase):
def test_get_cert_path_path(self):
path = '/some/path'
cert_path = get_cert_path(path)
self.assertEqual(path, cert_path)
def test_get_cert_path_certifi_or_default(self):
with mock.patch('botocore.httpsession.where') as where:
path = '/bundle/path'
where.return_value = path
cert_path = get_cert_path(True)
self.assertEqual(path, cert_path)
@pytest.mark.parametrize(
'proxy_url, expected_mask_url',
(
(
'http://myproxy.amazonaws.com',
'http://myproxy.amazonaws.com'
),
(
'http://user@myproxy.amazonaws.com',
'http://***@myproxy.amazonaws.com'
),
(
'http://user:pass@myproxy.amazonaws.com',
'http://***:***@myproxy.amazonaws.com'
),
(
'https://user:pass@myproxy.amazonaws.com',
'https://***:***@myproxy.amazonaws.com'
),
(
'http://user:pass@localhost',
'http://***:***@localhost'
),
(
'http://user:pass@localhost:80',
'http://***:***@localhost:80'
),
(
'http://user:pass@userpass.com',
'http://***:***@userpass.com'
),
(
'http://user:pass@192.168.1.1',
'http://***:***@192.168.1.1'
),
(
'http://user:pass@[::1]',
'http://***:***@[::1]'
),
(
'http://user:pass@[::1]:80',
'http://***:***@[::1]:80'
),
)
)
def test_mask_proxy_url(proxy_url, expected_mask_url):
assert mask_proxy_url(proxy_url) == expected_mask_url
class TestURLLib3Session(unittest.TestCase):
def setUp(self):
self.request = AWSRequest(
method='GET',
url='http://example.com/',
headers={},
data=b'',
)
self.response = mock.Mock()
self.response.headers = {}
self.response.stream.return_value = b''
self.pool_manager = mock.Mock()
self.connection = mock.Mock()
self.connection.urlopen.return_value = self.response
self.pool_manager.connection_from_url.return_value = self.connection
self.pool_patch = mock.patch('botocore.httpsession.PoolManager')
self.proxy_patch = mock.patch('botocore.httpsession.proxy_from_url')
self.pool_manager_cls = self.pool_patch.start()
self.proxy_manager_fun = self.proxy_patch.start()
self.pool_manager_cls.return_value = self.pool_manager
self.proxy_manager_fun.return_value = self.pool_manager
def tearDown(self):
self.pool_patch.stop()
self.proxy_patch.stop()
def assert_request_sent(self, headers=None, body=None, url='/', chunked=False):
if headers is None:
headers = {}
self.connection.urlopen.assert_called_once_with(
method=self.request.method,
url=url,
body=body,
headers=headers,
retries=mock.ANY,
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=chunked,
)
def _assert_manager_call(self, manager, *assert_args, **assert_kwargs):
call_kwargs = {
'strict': True,
'maxsize': mock.ANY,
'timeout': mock.ANY,
'ssl_context': mock.ANY,
'socket_options': [],
'cert_file': None,
'key_file': None,
}
call_kwargs.update(assert_kwargs)
manager.assert_called_with(*assert_args, **call_kwargs)
def assert_pool_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.pool_manager_cls, *args, **kwargs)
def assert_proxy_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.proxy_manager_fun, *args, **kwargs)
def test_forwards_max_pool_size(self):
URLLib3Session(max_pool_connections=22)
self.assert_pool_manager_call(maxsize=22)
def test_forwards_client_cert(self):
URLLib3Session(client_cert='/some/cert')
self.assert_pool_manager_call(cert_file='/some/cert', key_file=None)
def test_forwards_client_cert_and_key_tuple(self):
cert = ('/some/cert', '/some/key')
URLLib3Session(client_cert=cert)
self.assert_pool_manager_call(cert_file=cert[0], key_file=cert[1])
def test_proxies_config_settings(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': 'path/to/bundle',
'proxy_client_cert': ('path/to/cert', 'path/to/key'),
'proxy_use_forwarding_for_https': False,
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
proxy_ssl_context=mock.ANY,
use_forwarding_for_https=use_forwarding
)
self.assert_request_sent(url=self.request.url)
def test_proxies_config_settings_unknown_config(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': None,
'proxy_client_cert': None,
'proxy_use_forwarding_for_https': True,
'proxy_not_a_real_arg': 'do not pass'
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
use_forwarding_for_https=use_forwarding
)
self.assertNotIn(
'proxy_not_a_real_arg',
self.proxy_manager_fun.call_args
)
self.assert_request_sent(url=self.request.url)
def test_http_proxy_scheme_with_http_url(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
)
self.assert_request_sent(url=self.request.url)
def test_http_proxy_scheme_with_https_url(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
)
self.assert_request_sent()
def test_https_proxy_scheme_with_http_url(self):
proxies = {'http': 'https://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
)
self.assert_request_sent(url=self.request.url)
def test_https_proxy_scheme_tls_in_tls(self):
proxies = {'https': 'https://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
)
self.assert_request_sent()
def test_https_proxy_scheme_forwarding_https_url(self):
proxies = {'https': 'https://proxy.com'}
proxies_config = {"proxy_use_forwarding_for_https": True}
session = URLLib3Session(proxies=proxies, proxies_config=proxies_config)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
use_forwarding_for_https=True,
)
self.assert_request_sent(url=self.request.url)
def test_basic_https_proxy_with_client_cert(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies, client_cert='/some/cert')
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file='/some/cert',
key_file=None,
)
self.assert_request_sent()
def test_basic_https_proxy_with_client_cert_and_key(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies, client_cert=cert)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file=cert[0],
key_file=cert[1],
)
self.assert_request_sent()
def test_urllib3_proxies_kwargs_included(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'https://proxy.com'}
proxies_config = {'proxy_client_cert': "path/to/cert"}
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies, client_cert=cert,
proxies_config=proxies_config
)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file=cert[0],
key_file=cert[1],
proxy_ssl_context=mock.ANY
)
self.assert_request_sent()
def test_proxy_ssl_context_uses_check_hostname(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'https://proxy.com'}
proxies_config = {'proxy_client_cert': "path/to/cert"}
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies, client_cert=cert,
proxies_config=proxies_config
)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
last_call = self.proxy_manager_fun.call_args[-1]
self.assertIs(last_call['ssl_context'].check_hostname, True)
def test_proxy_ssl_context_does_not_use_check_hostname_if_ip_address(self):
cert = ('/some/cert', '/some/key')
proxies_config = {'proxy_client_cert': "path/to/cert"}
urls = ['https://1.2.3.4:5678',
'https://4.6.0.0',
'https://[FE80::8939:7684:D84b:a5A4%251]:1234',
'https://[FE80::8939:7684:D84b:a5A4%251]',
'https://[FE80::8939:7684:D84b:a5A4]:999',
'https://[FE80::8939:7684:D84b:a5A4]',
'https://[::1]:789']
for proxy_url in urls:
with mock.patch('botocore.httpsession.SSLContext'):
proxies = {'https': proxy_url}
session = URLLib3Session(
proxies=proxies, client_cert=cert,
proxies_config=proxies_config
)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
last_call = self.proxy_manager_fun.call_args[-1]
self.assertIs(last_call['ssl_context'].check_hostname, False)
def test_basic_request(self):
session = URLLib3Session()
session.send(self.request.prepare())
self.assert_request_sent()
self.response.stream.assert_called_once_with()
def test_basic_streaming_request(self):
session = URLLib3Session()
self.request.stream_output = True
session.send(self.request.prepare())
self.assert_request_sent()
self.response.stream.assert_not_called()
def test_basic_https_request(self):
session = URLLib3Session()
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_request_sent()
def test_basic_https_proxy_request(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(proxies['https'], proxy_headers={})
self.assert_request_sent()
def test_basic_proxy_request_caches_manager(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
# assert we created the proxy manager
self.assert_proxy_manager_call(proxies['https'], proxy_headers={})
session.send(self.request.prepare())
# assert that we did not create another proxy manager
self.assertEqual(self.proxy_manager_fun.call_count, 1)
def test_basic_http_proxy_request(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
session.send(self.request.prepare())
self.assert_proxy_manager_call(proxies['http'], proxy_headers={})
self.assert_request_sent(url=self.request.url)
def test_ssl_context_is_explicit(self):
session = URLLib3Session()
session.send(self.request.prepare())
_, manager_kwargs = self.pool_manager_cls.call_args
self.assertIsNotNone(manager_kwargs.get('ssl_context'))
def test_proxy_request_ssl_context_is_explicit(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
session.send(self.request.prepare())
_, proxy_kwargs = self.proxy_manager_fun.call_args
self.assertIsNotNone(proxy_kwargs.get('ssl_context'))
def test_session_forwards_socket_options_to_pool_manager(self):
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
URLLib3Session(socket_options=socket_options)
self.assert_pool_manager_call(socket_options=socket_options)
def test_session_forwards_socket_options_to_proxy_manager(self):
proxies = {'http': 'http://proxy.com'}
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
session = URLLib3Session(
proxies=proxies,
socket_options=socket_options,
)
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
socket_options=socket_options,
)
def make_request_with_error(self, error):
self.connection.urlopen.side_effect = error
session = URLLib3Session()
session.send(self.request.prepare())
def test_catches_new_connection_error(self):
error = NewConnectionError(None, None)
with pytest.raises(EndpointConnectionError):
self.make_request_with_error(error)
def test_catches_bad_status_line(self):
error = ProtocolError(None)
with pytest.raises(ConnectionClosedError):
self.make_request_with_error(error)
def test_catches_proxy_error(self):
self.connection.urlopen.side_effect = ProxyError('test', None)
session = URLLib3Session(proxies={'http': 'http://user:pass@proxy.com'})
with pytest.raises(ProxyConnectionError) as e:
session.send(self.request.prepare())
assert 'user:pass' not in str(e.value)
assert 'http://***:***@proxy.com' in str(e.value)
def test_aws_connection_classes_are_used(self):
session = URLLib3Session() # noqa
# ensure the pool manager is using the correct classes
http_class = self.pool_manager.pool_classes_by_scheme.get('http')
self.assertIs(http_class, AWSHTTPConnectionPool)
https_class = self.pool_manager.pool_classes_by_scheme.get('https')
self.assertIs(https_class, AWSHTTPSConnectionPool)
def test_chunked_encoding_is_set_with_header(self):
session = URLLib3Session()
self.request.headers['Transfer-Encoding'] = 'chunked'
session.send(self.request.prepare())
self.assert_request_sent(
chunked=True,
headers={'Transfer-Encoding': 'chunked'},
)
def test_chunked_encoding_is_not_set_without_header(self):
session = URLLib3Session()
session.send(self.request.prepare())
self.assert_request_sent(chunked=False)
| 37.24466
| 83
| 0.621918
|
cc64b4e3383c365e49e6cbc66ea7a56cd15880eb
| 5,658
|
py
|
Python
|
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/amazon/ec2_win_password.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/amazon/ec2_win_password.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
myprojectenv/lib/python3.5/site-packages/ansible/modules/cloud/amazon/ec2_win_password.py
|
lancerenteria/doFlask
|
2d4e242469b108c6c8316ee18a540307497bfb53
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances
description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module
has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
required: true
key_file:
description:
- Path to the file containing the key pair used on the instance.
required: true
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
wait:
version_added: "2.0"
description:
- Whether or not to wait for the password to be available before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
version_added: "2.0"
description:
- Number of seconds to wait before giving up.
required: false
default: 120
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Example of getting a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
# Example of getting a password with a password protected key
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_protected_test_key.pem"
key_passphrase: "secret"
# Example of waiting for a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
wait: yes
wait_timeout: 45
'''
from base64 import b64decode
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True, type='path'),
key_passphrase = dict(no_log=True, default=None, required=False),
wait = dict(type='bool', default=False, required=False),
wait_timeout = dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='Boto required for this module.')
instance_id = module.params.get('instance_id')
key_file = module.params.get('key_file')
key_passphrase = module.params.get('key_passphrase')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
ec2 = ec2_connect(module)
if wait:
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.now() < end:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and not decoded:
time.sleep(5)
else:
break
else:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
try:
f = open(key_file, 'r')
except IOError as e:
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
else:
try:
with f:
key = RSA.importKey(f.read(), key_passphrase)
except (ValueError, IndexError, TypeError) as e:
module.fail_json(msg = "unable to parse key file")
cipher = PKCS1_v1_5.new(key)
sentinel = 'password decryption failed!!!'
try:
decrypted = cipher.decrypt(decoded, sentinel)
except ValueError as e:
decrypted = None
if decrypted is None:
module.exit_json(win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
else:
module.exit_json(win_password=decrypted, changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| 30.918033
| 144
| 0.675327
|
97fc5d8ad783829f502455952a3941648669ed71
| 98,061
|
py
|
Python
|
Lib/test/test_pathlib.py
|
xuchanglong/Python-3.8.2-comments
|
f873f0b45b086c40a704c802e93e9a9c571aba83
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/test/test_pathlib.py
|
xuchanglong/Python-3.8.2-comments
|
f873f0b45b086c40a704c802e93e9a9c571aba83
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Lib/test/test_pathlib.py
|
xuchanglong/Python-3.8.2-comments
|
f873f0b45b086c40a704c802e93e9a9c571aba83
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
import collections.abc
import io
import os
import sys
import errno
import pathlib
import pickle
import socket
import stat
import tempfile
import unittest
from unittest import mock
from test import support
from test.support import TESTFN, FakePath
try:
import grp, pwd
except ImportError:
grp = pwd = None
class _BaseFlavourTest(object):
def _check_parse_parts(self, arg, expected):
f = self.flavour.parse_parts
sep = self.flavour.sep
altsep = self.flavour.altsep
actual = f([x.replace('/', sep) for x in arg])
self.assertEqual(actual, expected)
if altsep:
actual = f([x.replace('/', altsep) for x in arg])
self.assertEqual(actual, expected)
def test_parse_parts_common(self):
check = self._check_parse_parts
sep = self.flavour.sep
# Unanchored parts.
check([], ('', '', []))
check(['a'], ('', '', ['a']))
check(['a/'], ('', '', ['a']))
check(['a', 'b'], ('', '', ['a', 'b']))
# Expansion.
check(['a/b'], ('', '', ['a', 'b']))
check(['a/b/'], ('', '', ['a', 'b']))
check(['a', 'b/c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Collapsing and stripping excess slashes.
check(['a', 'b//c', 'd'], ('', '', ['a', 'b', 'c', 'd']))
check(['a', 'b/c/', 'd'], ('', '', ['a', 'b', 'c', 'd']))
# Eliminating standalone dots.
check(['.'], ('', '', []))
check(['.', '.', 'b'], ('', '', ['b']))
check(['a', '.', 'b'], ('', '', ['a', 'b']))
check(['a', '.', '.'], ('', '', ['a']))
# The first part is anchored.
check(['/a/b'], ('', sep, [sep, 'a', 'b']))
check(['/a', 'b'], ('', sep, [sep, 'a', 'b']))
check(['/a/', 'b'], ('', sep, [sep, 'a', 'b']))
# Ignoring parts before an anchored part.
check(['a', '/b', 'c'], ('', sep, [sep, 'b', 'c']))
check(['a', '/b', '/c'], ('', sep, [sep, 'c']))
class PosixFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._posix_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# Collapsing of excess leading slashes, except for the double-slash
# special case.
check(['//a', 'b'], ('', '//', ['//', 'a', 'b']))
check(['///a', 'b'], ('', '/', ['/', 'a', 'b']))
check(['////a', 'b'], ('', '/', ['/', 'a', 'b']))
# Paths which look like NT paths aren't treated specially.
check(['c:a'], ('', '', ['c:a']))
check(['c:\\a'], ('', '', ['c:\\a']))
check(['\\a'], ('', '', ['\\a']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a/b'), ('', '', 'a/b'))
self.assertEqual(f('a/b/'), ('', '', 'a/b/'))
self.assertEqual(f('/a'), ('', '/', 'a'))
self.assertEqual(f('/a/b'), ('', '/', 'a/b'))
self.assertEqual(f('/a/b/'), ('', '/', 'a/b/'))
# The root is collapsed when there are redundant slashes
# except when there are exactly two leading slashes, which
# is a special case in POSIX.
self.assertEqual(f('//a'), ('', '//', 'a'))
self.assertEqual(f('///a'), ('', '/', 'a'))
self.assertEqual(f('///a/b'), ('', '/', 'a/b'))
# Paths which look like NT paths aren't treated specially.
self.assertEqual(f('c:/a/b'), ('', '', 'c:/a/b'))
self.assertEqual(f('\\/a/b'), ('', '', '\\/a/b'))
self.assertEqual(f('\\a\\b'), ('', '', '\\a\\b'))
class NTFlavourTest(_BaseFlavourTest, unittest.TestCase):
flavour = pathlib._windows_flavour
def test_parse_parts(self):
check = self._check_parse_parts
# First part is anchored.
check(['c:'], ('c:', '', ['c:']))
check(['c:/'], ('c:', '\\', ['c:\\']))
check(['/'], ('', '\\', ['\\']))
check(['c:a'], ('c:', '', ['c:', 'a']))
check(['c:/a'], ('c:', '\\', ['c:\\', 'a']))
check(['/a'], ('', '\\', ['\\', 'a']))
# UNC paths.
check(['//a/b'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/'], ('\\\\a\\b', '\\', ['\\\\a\\b\\']))
check(['//a/b/c'], ('\\\\a\\b', '\\', ['\\\\a\\b\\', 'c']))
# Second part is anchored, so that the first part is ignored.
check(['a', 'Z:b', 'c'], ('Z:', '', ['Z:', 'b', 'c']))
check(['a', 'Z:/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
# UNC paths.
check(['a', '//b/c', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Collapsing and stripping excess slashes.
check(['a', 'Z://b//c/', 'd/'], ('Z:', '\\', ['Z:\\', 'b', 'c', 'd']))
# UNC paths.
check(['a', '//b/c//', 'd'], ('\\\\b\\c', '\\', ['\\\\b\\c\\', 'd']))
# Extended paths.
check(['//?/c:/'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\']))
check(['//?/c:/a'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'a']))
check(['//?/c:/a', '/b'], ('\\\\?\\c:', '\\', ['\\\\?\\c:\\', 'b']))
# Extended UNC paths (format is "\\?\UNC\server\share").
check(['//?/UNC/b/c'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\']))
check(['//?/UNC/b/c/d'], ('\\\\?\\UNC\\b\\c', '\\', ['\\\\?\\UNC\\b\\c\\', 'd']))
# Second part has a root but not drive.
check(['a', '/b', 'c'], ('', '\\', ['\\', 'b', 'c']))
check(['Z:/a', '/b', 'c'], ('Z:', '\\', ['Z:\\', 'b', 'c']))
check(['//?/Z:/a', '/b', 'c'], ('\\\\?\\Z:', '\\', ['\\\\?\\Z:\\', 'b', 'c']))
def test_splitroot(self):
f = self.flavour.splitroot
self.assertEqual(f(''), ('', '', ''))
self.assertEqual(f('a'), ('', '', 'a'))
self.assertEqual(f('a\\b'), ('', '', 'a\\b'))
self.assertEqual(f('\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('c:a\\b'), ('c:', '', 'a\\b'))
self.assertEqual(f('c:\\a\\b'), ('c:', '\\', 'a\\b'))
# Redundant slashes in the root are collapsed.
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
self.assertEqual(f('\\\\\\a/b'), ('', '\\', 'a/b'))
self.assertEqual(f('c:\\\\a'), ('c:', '\\', 'a'))
self.assertEqual(f('c:\\\\\\a/b'), ('c:', '\\', 'a/b'))
# Valid UNC paths.
self.assertEqual(f('\\\\a\\b'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\'), ('\\\\a\\b', '\\', ''))
self.assertEqual(f('\\\\a\\b\\c\\d'), ('\\\\a\\b', '\\', 'c\\d'))
# These are non-UNC paths (according to ntpath.py and test_ntpath).
# However, command.com says such paths are invalid, so it's
# difficult to know what the right semantics are.
self.assertEqual(f('\\\\\\a\\b'), ('', '\\', 'a\\b'))
self.assertEqual(f('\\\\a'), ('', '\\', 'a'))
#
# Tests for the pure classes.
#
class _BasePurePathTest(object):
# Keys are canonical paths, values are list of tuples of arguments
# supposed to produce equal paths.
equivalences = {
'a/b': [
('a', 'b'), ('a/', 'b'), ('a', 'b/'), ('a/', 'b/'),
('a/b/',), ('a//b',), ('a//b//',),
# Empty components get removed.
('', 'a', 'b'), ('a', '', 'b'), ('a', 'b', ''),
],
'/b/c/d': [
('a', '/b/c', 'd'), ('a', '///b//c', 'd/'),
('/a', '/b/c', 'd'),
# Empty components get removed.
('/', 'b', '', 'c/d'), ('/', '', 'b/c/d'), ('', '/b/c/d'),
],
}
def setUp(self):
p = self.cls('a')
self.flavour = p._flavour
self.sep = self.flavour.sep
self.altsep = self.flavour.altsep
def test_constructor_common(self):
P = self.cls
p = P('a')
self.assertIsInstance(p, P)
P('a', 'b', 'c')
P('/a', 'b', 'c')
P('a/b/c')
P('/a/b/c')
P(FakePath("a/b/c"))
self.assertEqual(P(P('a')), P('a'))
self.assertEqual(P(P('a'), 'b'), P('a/b'))
self.assertEqual(P(P('a'), P('b')), P('a/b'))
self.assertEqual(P(P('a'), P('b'), P('c')), P(FakePath("a/b/c")))
def _check_str_subclass(self, *args):
# Issue #21127: it should be possible to construct a PurePath object
# from a str subclass instance, and it then gets converted to
# a pure str object.
class StrSubclass(str):
pass
P = self.cls
p = P(*(StrSubclass(x) for x in args))
self.assertEqual(p, P(*args))
for part in p.parts:
self.assertIs(type(part), str)
def test_str_subclass_common(self):
self._check_str_subclass('')
self._check_str_subclass('.')
self._check_str_subclass('a')
self._check_str_subclass('a/b.txt')
self._check_str_subclass('/a/b.txt')
def test_join_common(self):
P = self.cls
p = P('a/b')
pp = p.joinpath('c')
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p.joinpath('c', 'd')
self.assertEqual(pp, P('a/b/c/d'))
pp = p.joinpath(P('c'))
self.assertEqual(pp, P('a/b/c'))
pp = p.joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div_common(self):
# Basically the same as joinpath().
P = self.cls
p = P('a/b')
pp = p / 'c'
self.assertEqual(pp, P('a/b/c'))
self.assertIs(type(pp), type(p))
pp = p / 'c/d'
self.assertEqual(pp, P('a/b/c/d'))
pp = p / 'c' / 'd'
self.assertEqual(pp, P('a/b/c/d'))
pp = 'c' / p / 'd'
self.assertEqual(pp, P('c/a/b/d'))
pp = p / P('c')
self.assertEqual(pp, P('a/b/c'))
pp = p/ '/c'
self.assertEqual(pp, P('/c'))
def _check_str(self, expected, args):
p = self.cls(*args)
self.assertEqual(str(p), expected.replace('/', self.sep))
def test_str_common(self):
# Canonicalized paths roundtrip.
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self._check_str(pathstr, (pathstr,))
# Special case for the empty path.
self._check_str('.', ('',))
# Other tests for str() are in test_equivalences().
def test_as_posix_common(self):
P = self.cls
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
self.assertEqual(P(pathstr).as_posix(), pathstr)
# Other tests for as_posix() are in test_equivalences().
def test_as_bytes_common(self):
sep = os.fsencode(self.sep)
P = self.cls
self.assertEqual(bytes(P('a/b')), b'a' + sep + b'b')
def test_as_uri_common(self):
P = self.cls
with self.assertRaises(ValueError):
P('a').as_uri()
with self.assertRaises(ValueError):
P().as_uri()
def test_repr_common(self):
for pathstr in ('a', 'a/b', 'a/b/c', '/', '/a/b', '/a/b/c'):
p = self.cls(pathstr)
clsname = p.__class__.__name__
r = repr(p)
# The repr() is in the form ClassName("forward-slashes path").
self.assertTrue(r.startswith(clsname + '('), r)
self.assertTrue(r.endswith(')'), r)
inner = r[len(clsname) + 1 : -1]
self.assertEqual(eval(inner), p.as_posix())
# The repr() roundtrips.
q = eval(r, pathlib.__dict__)
self.assertIs(q.__class__, p.__class__)
self.assertEqual(q, p)
self.assertEqual(repr(q), r)
def test_eq_common(self):
P = self.cls
self.assertEqual(P('a/b'), P('a/b'))
self.assertEqual(P('a/b'), P('a', 'b'))
self.assertNotEqual(P('a/b'), P('a'))
self.assertNotEqual(P('a/b'), P('/a/b'))
self.assertNotEqual(P('a/b'), P())
self.assertNotEqual(P('/a/b'), P('/'))
self.assertNotEqual(P(), P('/'))
self.assertNotEqual(P(), "")
self.assertNotEqual(P(), {})
self.assertNotEqual(P(), int)
def test_match_common(self):
P = self.cls
self.assertRaises(ValueError, P('a').match, '')
self.assertRaises(ValueError, P('a').match, '.')
# Simple relative pattern.
self.assertTrue(P('b.py').match('b.py'))
self.assertTrue(P('a/b.py').match('b.py'))
self.assertTrue(P('/a/b.py').match('b.py'))
self.assertFalse(P('a.py').match('b.py'))
self.assertFalse(P('b/py').match('b.py'))
self.assertFalse(P('/a.py').match('b.py'))
self.assertFalse(P('b.py/c').match('b.py'))
# Wilcard relative pattern.
self.assertTrue(P('b.py').match('*.py'))
self.assertTrue(P('a/b.py').match('*.py'))
self.assertTrue(P('/a/b.py').match('*.py'))
self.assertFalse(P('b.pyc').match('*.py'))
self.assertFalse(P('b./py').match('*.py'))
self.assertFalse(P('b.py/c').match('*.py'))
# Multi-part relative pattern.
self.assertTrue(P('ab/c.py').match('a*/*.py'))
self.assertTrue(P('/d/ab/c.py').match('a*/*.py'))
self.assertFalse(P('a.py').match('a*/*.py'))
self.assertFalse(P('/dab/c.py').match('a*/*.py'))
self.assertFalse(P('ab/c.py/d').match('a*/*.py'))
# Absolute pattern.
self.assertTrue(P('/b.py').match('/*.py'))
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('a/b.py').match('/*.py'))
self.assertFalse(P('/a/b.py').match('/*.py'))
# Multi-part absolute pattern.
self.assertTrue(P('/a/b.py').match('/a/*.py'))
self.assertFalse(P('/ab.py').match('/a/*.py'))
self.assertFalse(P('/a/b/c.py').match('/a/*.py'))
# Multi-part glob-style pattern.
self.assertFalse(P('/a/b/c.py').match('/**/*.py'))
self.assertTrue(P('/a/b/c.py').match('/a/**/*.py'))
def test_ordering_common(self):
# Ordering is tuple-alike.
def assertLess(a, b):
self.assertLess(a, b)
self.assertGreater(b, a)
P = self.cls
a = P('a')
b = P('a/b')
c = P('abc')
d = P('b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
P = self.cls
a = P('/a')
b = P('/a/b')
c = P('/abc')
d = P('/b')
assertLess(a, b)
assertLess(a, c)
assertLess(a, d)
assertLess(b, c)
assertLess(c, d)
with self.assertRaises(TypeError):
P() < {}
def test_parts_common(self):
# `parts` returns a tuple.
sep = self.sep
P = self.cls
p = P('a/b')
parts = p.parts
self.assertEqual(parts, ('a', 'b'))
# The object gets reused.
self.assertIs(parts, p.parts)
# When the path is absolute, the anchor is a separate part.
p = P('/a/b')
parts = p.parts
self.assertEqual(parts, (sep, 'a', 'b'))
def test_fspath_common(self):
P = self.cls
p = P('a/b')
self._check_str(p.__fspath__(), ('a/b',))
self._check_str(os.fspath(p), ('a/b',))
def test_equivalences(self):
for k, tuples in self.equivalences.items():
canon = k.replace('/', self.sep)
posix = k.replace(self.sep, '/')
if canon != posix:
tuples = tuples + [
tuple(part.replace('/', self.sep) for part in t)
for t in tuples
]
tuples.append((posix, ))
pcanon = self.cls(canon)
for t in tuples:
p = self.cls(*t)
self.assertEqual(p, pcanon, "failed with args {}".format(t))
self.assertEqual(hash(p), hash(pcanon))
self.assertEqual(str(p), canon)
self.assertEqual(p.as_posix(), posix)
def test_parent_common(self):
# Relative
P = self.cls
p = P('a/b/c')
self.assertEqual(p.parent, P('a/b'))
self.assertEqual(p.parent.parent, P('a'))
self.assertEqual(p.parent.parent.parent, P())
self.assertEqual(p.parent.parent.parent.parent, P())
# Anchored
p = P('/a/b/c')
self.assertEqual(p.parent, P('/a/b'))
self.assertEqual(p.parent.parent, P('/a'))
self.assertEqual(p.parent.parent.parent, P('/'))
self.assertEqual(p.parent.parent.parent.parent, P('/'))
def test_parents_common(self):
# Relative
P = self.cls
p = P('a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('a/b'))
self.assertEqual(par[1], P('a'))
self.assertEqual(par[2], P('.'))
self.assertEqual(list(par), [P('a/b'), P('a'), P('.')])
with self.assertRaises(IndexError):
par[-1]
with self.assertRaises(IndexError):
par[3]
with self.assertRaises(TypeError):
par[0] = p
# Anchored
p = P('/a/b/c')
par = p.parents
self.assertEqual(len(par), 3)
self.assertEqual(par[0], P('/a/b'))
self.assertEqual(par[1], P('/a'))
self.assertEqual(par[2], P('/'))
self.assertEqual(list(par), [P('/a/b'), P('/a'), P('/')])
with self.assertRaises(IndexError):
par[3]
def test_drive_common(self):
P = self.cls
self.assertEqual(P('a/b').drive, '')
self.assertEqual(P('/a/b').drive, '')
self.assertEqual(P('').drive, '')
def test_root_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').root, '')
self.assertEqual(P('a/b').root, '')
self.assertEqual(P('/').root, sep)
self.assertEqual(P('/a/b').root, sep)
def test_anchor_common(self):
P = self.cls
sep = self.sep
self.assertEqual(P('').anchor, '')
self.assertEqual(P('a/b').anchor, '')
self.assertEqual(P('/').anchor, sep)
self.assertEqual(P('/a/b').anchor, sep)
def test_name_common(self):
P = self.cls
self.assertEqual(P('').name, '')
self.assertEqual(P('.').name, '')
self.assertEqual(P('/').name, '')
self.assertEqual(P('a/b').name, 'b')
self.assertEqual(P('/a/b').name, 'b')
self.assertEqual(P('/a/b/.').name, 'b')
self.assertEqual(P('a/b.py').name, 'b.py')
self.assertEqual(P('/a/b.py').name, 'b.py')
def test_suffix_common(self):
P = self.cls
self.assertEqual(P('').suffix, '')
self.assertEqual(P('.').suffix, '')
self.assertEqual(P('..').suffix, '')
self.assertEqual(P('/').suffix, '')
self.assertEqual(P('a/b').suffix, '')
self.assertEqual(P('/a/b').suffix, '')
self.assertEqual(P('/a/b/.').suffix, '')
self.assertEqual(P('a/b.py').suffix, '.py')
self.assertEqual(P('/a/b.py').suffix, '.py')
self.assertEqual(P('a/.hgrc').suffix, '')
self.assertEqual(P('/a/.hgrc').suffix, '')
self.assertEqual(P('a/.hg.rc').suffix, '.rc')
self.assertEqual(P('/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('/a/Some name. Ending with a dot.').suffix, '')
def test_suffixes_common(self):
P = self.cls
self.assertEqual(P('').suffixes, [])
self.assertEqual(P('.').suffixes, [])
self.assertEqual(P('/').suffixes, [])
self.assertEqual(P('a/b').suffixes, [])
self.assertEqual(P('/a/b').suffixes, [])
self.assertEqual(P('/a/b/.').suffixes, [])
self.assertEqual(P('a/b.py').suffixes, ['.py'])
self.assertEqual(P('/a/b.py').suffixes, ['.py'])
self.assertEqual(P('a/.hgrc').suffixes, [])
self.assertEqual(P('/a/.hgrc').suffixes, [])
self.assertEqual(P('a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('/a/Some name. Ending with a dot.').suffixes, [])
def test_stem_common(self):
P = self.cls
self.assertEqual(P('').stem, '')
self.assertEqual(P('.').stem, '')
self.assertEqual(P('..').stem, '..')
self.assertEqual(P('/').stem, '')
self.assertEqual(P('a/b').stem, 'b')
self.assertEqual(P('a/b.py').stem, 'b')
self.assertEqual(P('a/.hgrc').stem, '.hgrc')
self.assertEqual(P('a/.hg.rc').stem, '.hg')
self.assertEqual(P('a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name_common(self):
P = self.cls
self.assertEqual(P('a/b').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/b.py').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/b.py').with_name('d.xml'), P('/a/d.xml'))
self.assertEqual(P('a/Dot ending.').with_name('d.xml'), P('a/d.xml'))
self.assertEqual(P('/a/Dot ending.').with_name('d.xml'), P('/a/d.xml'))
self.assertRaises(ValueError, P('').with_name, 'd.xml')
self.assertRaises(ValueError, P('.').with_name, 'd.xml')
self.assertRaises(ValueError, P('/').with_name, 'd.xml')
self.assertRaises(ValueError, P('a/b').with_name, '')
self.assertRaises(ValueError, P('a/b').with_name, '/c')
self.assertRaises(ValueError, P('a/b').with_name, 'c/')
self.assertRaises(ValueError, P('a/b').with_name, 'c/d')
def test_with_suffix_common(self):
P = self.cls
self.assertEqual(P('a/b').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b').with_suffix('.gz'), P('/a/b.gz'))
self.assertEqual(P('a/b.py').with_suffix('.gz'), P('a/b.gz'))
self.assertEqual(P('/a/b.py').with_suffix('.gz'), P('/a/b.gz'))
# Stripping suffix.
self.assertEqual(P('a/b.py').with_suffix(''), P('a/b'))
self.assertEqual(P('/a/b').with_suffix(''), P('/a/b'))
# Path doesn't have a "filename" component.
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
# Invalid suffix.
self.assertRaises(ValueError, P('a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('a/b').with_suffix, '/')
self.assertRaises(ValueError, P('a/b').with_suffix, '.')
self.assertRaises(ValueError, P('a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.c/.d')
self.assertRaises(ValueError, P('a/b').with_suffix, './.d')
self.assertRaises(ValueError, P('a/b').with_suffix, '.d/.')
self.assertRaises(ValueError, P('a/b').with_suffix,
(self.flavour.sep, 'd'))
def test_relative_to_common(self):
P = self.cls
p = P('a/b')
self.assertRaises(TypeError, p.relative_to)
self.assertRaises(TypeError, p.relative_to, b'a')
self.assertEqual(p.relative_to(P()), P('a/b'))
self.assertEqual(p.relative_to(''), P('a/b'))
self.assertEqual(p.relative_to(P('a')), P('b'))
self.assertEqual(p.relative_to('a'), P('b'))
self.assertEqual(p.relative_to('a/'), P('b'))
self.assertEqual(p.relative_to(P('a/b')), P())
self.assertEqual(p.relative_to('a/b'), P())
# With several args.
self.assertEqual(p.relative_to('a', 'b'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('c'))
self.assertRaises(ValueError, p.relative_to, P('a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('a/c'))
self.assertRaises(ValueError, p.relative_to, P('/a'))
p = P('/a/b')
self.assertEqual(p.relative_to(P('/')), P('a/b'))
self.assertEqual(p.relative_to('/'), P('a/b'))
self.assertEqual(p.relative_to(P('/a')), P('b'))
self.assertEqual(p.relative_to('/a'), P('b'))
self.assertEqual(p.relative_to('/a/'), P('b'))
self.assertEqual(p.relative_to(P('/a/b')), P())
self.assertEqual(p.relative_to('/a/b'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/b/c'))
self.assertRaises(ValueError, p.relative_to, P('/a/c'))
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('a'))
def test_pickling_common(self):
P = self.cls
p = P('/a/b')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertIs(pp.__class__, p.__class__)
self.assertEqual(pp, p)
self.assertEqual(hash(pp), hash(p))
self.assertEqual(str(pp), str(p))
class PurePosixPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePosixPath
def test_root(self):
P = self.cls
self.assertEqual(P('/a/b').root, '/')
self.assertEqual(P('///a/b').root, '/')
# POSIX special case for two leading slashes.
self.assertEqual(P('//a/b').root, '//')
def test_eq(self):
P = self.cls
self.assertNotEqual(P('a/b'), P('A/b'))
self.assertEqual(P('/a'), P('///a'))
self.assertNotEqual(P('/a'), P('//a'))
def test_as_uri(self):
P = self.cls
self.assertEqual(P('/').as_uri(), 'file:///')
self.assertEqual(P('/a/b.c').as_uri(), 'file:///a/b.c')
self.assertEqual(P('/a/b%#c').as_uri(), 'file:///a/b%25%23c')
def test_as_uri_non_ascii(self):
from urllib.parse import quote_from_bytes
P = self.cls
try:
os.fsencode('\xe9')
except UnicodeEncodeError:
self.skipTest("\\xe9 cannot be encoded to the filesystem encoding")
self.assertEqual(P('/a/b\xe9').as_uri(),
'file:///a/b' + quote_from_bytes(os.fsencode('\xe9')))
def test_match(self):
P = self.cls
self.assertFalse(P('A.py').match('a.PY'))
def test_is_absolute(self):
P = self.cls
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertTrue(P('/').is_absolute())
self.assertTrue(P('/a').is_absolute())
self.assertTrue(P('/a/b/').is_absolute())
self.assertTrue(P('//a').is_absolute())
self.assertTrue(P('//a/b').is_absolute())
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(False, P('/dev/con/PRN/NUL').is_reserved())
def test_join(self):
P = self.cls
p = P('//a')
pp = p.joinpath('b')
self.assertEqual(pp, P('//a/b'))
pp = P('/a').joinpath('//c')
self.assertEqual(pp, P('//c'))
pp = P('//a').joinpath('/c')
self.assertEqual(pp, P('/c'))
def test_div(self):
# Basically the same as joinpath().
P = self.cls
p = P('//a')
pp = p / 'b'
self.assertEqual(pp, P('//a/b'))
pp = P('/a') / '//c'
self.assertEqual(pp, P('//c'))
pp = P('//a') / '/c'
self.assertEqual(pp, P('/c'))
class PureWindowsPathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PureWindowsPath
equivalences = _BasePurePathTest.equivalences.copy()
equivalences.update({
'c:a': [ ('c:', 'a'), ('c:', 'a/'), ('/', 'c:', 'a') ],
'c:/a': [
('c:/', 'a'), ('c:', '/', 'a'), ('c:', '/a'),
('/z', 'c:/', 'a'), ('//x/y', 'c:/', 'a'),
],
'//a/b/': [ ('//a/b',) ],
'//a/b/c': [
('//a/b', 'c'), ('//a/b/', 'c'),
],
})
def test_str(self):
p = self.cls('a/b/c')
self.assertEqual(str(p), 'a\\b\\c')
p = self.cls('c:/a/b/c')
self.assertEqual(str(p), 'c:\\a\\b\\c')
p = self.cls('//a/b')
self.assertEqual(str(p), '\\\\a\\b\\')
p = self.cls('//a/b/c')
self.assertEqual(str(p), '\\\\a\\b\\c')
p = self.cls('//a/b/c/d')
self.assertEqual(str(p), '\\\\a\\b\\c\\d')
def test_str_subclass(self):
self._check_str_subclass('c:')
self._check_str_subclass('c:a')
self._check_str_subclass('c:a\\b.txt')
self._check_str_subclass('c:\\')
self._check_str_subclass('c:\\a')
self._check_str_subclass('c:\\a\\b.txt')
self._check_str_subclass('\\\\some\\share')
self._check_str_subclass('\\\\some\\share\\a')
self._check_str_subclass('\\\\some\\share\\a\\b.txt')
def test_eq(self):
P = self.cls
self.assertEqual(P('c:a/b'), P('c:a/b'))
self.assertEqual(P('c:a/b'), P('c:', 'a', 'b'))
self.assertNotEqual(P('c:a/b'), P('d:a/b'))
self.assertNotEqual(P('c:a/b'), P('c:/a/b'))
self.assertNotEqual(P('/a/b'), P('c:/a/b'))
# Case-insensitivity.
self.assertEqual(P('a/B'), P('A/b'))
self.assertEqual(P('C:a/B'), P('c:A/b'))
self.assertEqual(P('//Some/SHARE/a/B'), P('//somE/share/A/b'))
def test_as_uri(self):
P = self.cls
with self.assertRaises(ValueError):
P('/a/b').as_uri()
with self.assertRaises(ValueError):
P('c:a/b').as_uri()
self.assertEqual(P('c:/').as_uri(), 'file:///c:/')
self.assertEqual(P('c:/a/b.c').as_uri(), 'file:///c:/a/b.c')
self.assertEqual(P('c:/a/b%#c').as_uri(), 'file:///c:/a/b%25%23c')
self.assertEqual(P('c:/a/b\xe9').as_uri(), 'file:///c:/a/b%C3%A9')
self.assertEqual(P('//some/share/').as_uri(), 'file://some/share/')
self.assertEqual(P('//some/share/a/b.c').as_uri(),
'file://some/share/a/b.c')
self.assertEqual(P('//some/share/a/b%#c\xe9').as_uri(),
'file://some/share/a/b%25%23c%C3%A9')
def test_match_common(self):
P = self.cls
# Absolute patterns.
self.assertTrue(P('c:/b.py').match('/*.py'))
self.assertTrue(P('c:/b.py').match('c:*.py'))
self.assertTrue(P('c:/b.py').match('c:/*.py'))
self.assertFalse(P('d:/b.py').match('c:/*.py')) # wrong drive
self.assertFalse(P('b.py').match('/*.py'))
self.assertFalse(P('b.py').match('c:*.py'))
self.assertFalse(P('b.py').match('c:/*.py'))
self.assertFalse(P('c:b.py').match('/*.py'))
self.assertFalse(P('c:b.py').match('c:/*.py'))
self.assertFalse(P('/b.py').match('c:*.py'))
self.assertFalse(P('/b.py').match('c:/*.py'))
# UNC patterns.
self.assertTrue(P('//some/share/a.py').match('/*.py'))
self.assertTrue(P('//some/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//other/share/a.py').match('//some/share/*.py'))
self.assertFalse(P('//some/share/a/b.py').match('//some/share/*.py'))
# Case-insensitivity.
self.assertTrue(P('B.py').match('b.PY'))
self.assertTrue(P('c:/a/B.Py').match('C:/A/*.pY'))
self.assertTrue(P('//Some/Share/B.Py').match('//somE/sharE/*.pY'))
def test_ordering_common(self):
# Case-insensitivity.
def assertOrderedEqual(a, b):
self.assertLessEqual(a, b)
self.assertGreaterEqual(b, a)
P = self.cls
p = P('c:A/b')
q = P('C:a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
p = P('//some/Share/A/b')
q = P('//Some/SHARE/a/B')
assertOrderedEqual(p, q)
self.assertFalse(p < q)
self.assertFalse(p > q)
def test_parts(self):
P = self.cls
p = P('c:a/b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P('c:/a/b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P('//a/b/c/d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b'))
self.assertEqual(p.parent.parent.parent, P('//a/b'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b/')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b'))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b')])
with self.assertRaises(IndexError):
par[2]
def test_drive(self):
P = self.cls
self.assertEqual(P('c:').drive, 'c:')
self.assertEqual(P('c:a/b').drive, 'c:')
self.assertEqual(P('c:/').drive, 'c:')
self.assertEqual(P('c:/a/b/').drive, 'c:')
self.assertEqual(P('//a/b').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/').drive, '\\\\a\\b')
self.assertEqual(P('//a/b/c/d').drive, '\\\\a\\b')
def test_root(self):
P = self.cls
self.assertEqual(P('c:').root, '')
self.assertEqual(P('c:a/b').root, '')
self.assertEqual(P('c:/').root, '\\')
self.assertEqual(P('c:/a/b/').root, '\\')
self.assertEqual(P('//a/b').root, '\\')
self.assertEqual(P('//a/b/').root, '\\')
self.assertEqual(P('//a/b/c/d').root, '\\')
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:/').anchor, 'c:\\')
self.assertEqual(P('c:/a/b/').anchor, 'c:\\')
self.assertEqual(P('//a/b').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/').anchor, '\\\\a\\b\\')
self.assertEqual(P('//a/b/c/d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffix, '')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/Some name. Ending with a dot.').suffixes, [])
self.assertEqual(P('c:/a/Some name. Ending with a dot.').suffixes, [])
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:.').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/Some name. Ending with a dot.').stem,
'Some name. Ending with a dot.')
def test_with_name(self):
P = self.cls
self.assertEqual(P('c:a/b').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/b').with_name('d.xml'), P('c:/a/d.xml'))
self.assertEqual(P('c:a/Dot ending.').with_name('d.xml'), P('c:a/d.xml'))
self.assertEqual(P('c:/a/Dot ending.').with_name('d.xml'), P('c:/a/d.xml'))
self.assertRaises(ValueError, P('c:').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:/').with_name, 'd.xml')
self.assertRaises(ValueError, P('//My/Share').with_name, 'd.xml')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:e')
self.assertRaises(ValueError, P('c:a/b').with_name, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_name, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component.
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('.').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix.
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
def test_relative_to(self):
P = self.cls
p = P('C:Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:foO')), P('Bar'))
self.assertEqual(p.relative_to('c:foO'), P('Bar'))
self.assertEqual(p.relative_to('c:foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:foO/baR')), P())
self.assertEqual(p.relative_to('c:foO/baR'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P())
self.assertRaises(ValueError, p.relative_to, '')
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('Foo'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo/Baz'))
p = P('C:/Foo/Bar')
self.assertEqual(p.relative_to(P('c:')), P('/Foo/Bar'))
self.assertEqual(p.relative_to('c:'), P('/Foo/Bar'))
self.assertEqual(str(p.relative_to(P('c:'))), '\\Foo\\Bar')
self.assertEqual(str(p.relative_to('c:')), '\\Foo\\Bar')
self.assertEqual(p.relative_to(P('c:/')), P('Foo/Bar'))
self.assertEqual(p.relative_to('c:/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('c:/foO')), P('Bar'))
self.assertEqual(p.relative_to('c:/foO'), P('Bar'))
self.assertEqual(p.relative_to('c:/foO/'), P('Bar'))
self.assertEqual(p.relative_to(P('c:/foO/baR')), P())
self.assertEqual(p.relative_to('c:/foO/baR'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('C:/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Bar/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:/Foo/Baz'))
self.assertRaises(ValueError, p.relative_to, P('C:Foo'))
self.assertRaises(ValueError, p.relative_to, P('d:'))
self.assertRaises(ValueError, p.relative_to, P('d:/'))
self.assertRaises(ValueError, p.relative_to, P('/'))
self.assertRaises(ValueError, p.relative_to, P('/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//C/Foo'))
# UNC paths.
p = P('//Server/Share/Foo/Bar')
self.assertEqual(p.relative_to(P('//sErver/sHare')), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare'), P('Foo/Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/'), P('Foo/Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo')), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo'), P('Bar'))
self.assertEqual(p.relative_to('//sErver/sHare/Foo/'), P('Bar'))
self.assertEqual(p.relative_to(P('//sErver/sHare/Foo/Bar')), P())
self.assertEqual(p.relative_to('//sErver/sHare/Foo/Bar'), P())
# Unrelated paths.
self.assertRaises(ValueError, p.relative_to, P('/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('c:/Server/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//z/Share/Foo'))
self.assertRaises(ValueError, p.relative_to, P('//Server/z/Foo'))
def test_is_absolute(self):
P = self.cls
# Under NT, only paths with both a drive and a root are absolute.
self.assertFalse(P().is_absolute())
self.assertFalse(P('a').is_absolute())
self.assertFalse(P('a/b/').is_absolute())
self.assertFalse(P('/').is_absolute())
self.assertFalse(P('/a').is_absolute())
self.assertFalse(P('/a/b/').is_absolute())
self.assertFalse(P('c:').is_absolute())
self.assertFalse(P('c:a').is_absolute())
self.assertFalse(P('c:a/b/').is_absolute())
self.assertTrue(P('c:/').is_absolute())
self.assertTrue(P('c:/a').is_absolute())
self.assertTrue(P('c:/a/b/').is_absolute())
# UNC paths are absolute by definition.
self.assertTrue(P('//a/b').is_absolute())
self.assertTrue(P('//a/b/').is_absolute())
self.assertTrue(P('//a/b/c').is_absolute())
self.assertTrue(P('//a/b/c/d').is_absolute())
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P('C:/a/b/x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('C:/x/y'))
def test_div(self):
# Basically the same as joinpath().
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'x' / 'y', P('C:/a/b/x/y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P('C:/a/b/x/y'))
self.assertEqual(p / 'c:/x/y', P('C:/x/y'))
def test_is_reserved(self):
P = self.cls
self.assertIs(False, P('').is_reserved())
self.assertIs(False, P('/').is_reserved())
self.assertIs(False, P('/foo/bar').is_reserved())
self.assertIs(True, P('con').is_reserved())
self.assertIs(True, P('NUL').is_reserved())
self.assertIs(True, P('NUL.txt').is_reserved())
self.assertIs(True, P('com1').is_reserved())
self.assertIs(True, P('com9.bar').is_reserved())
self.assertIs(False, P('bar.com9').is_reserved())
self.assertIs(True, P('lpt1').is_reserved())
self.assertIs(True, P('lpt9.bar').is_reserved())
self.assertIs(False, P('bar.lpt9').is_reserved())
# Only the last component matters.
self.assertIs(False, P('c:/NUL/con/baz').is_reserved())
# UNC paths are never reserved.
self.assertIs(False, P('//my/share/nul/con/aux').is_reserved())
class PurePathTest(_BasePurePathTest, unittest.TestCase):
cls = pathlib.PurePath
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.PureWindowsPath if os.name == 'nt' else pathlib.PurePosixPath)
def test_different_flavours_unequal(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
self.assertNotEqual(p, q)
def test_different_flavours_unordered(self):
p = pathlib.PurePosixPath('a')
q = pathlib.PureWindowsPath('a')
with self.assertRaises(TypeError):
p < q
with self.assertRaises(TypeError):
p <= q
with self.assertRaises(TypeError):
p > q
with self.assertRaises(TypeError):
p >= q
#
# Tests for the concrete classes.
#
# Make sure any symbolic links in the base test path are resolved.
BASE = os.path.realpath(TESTFN)
join = lambda *x: os.path.join(BASE, *x)
rel_join = lambda *x: os.path.join(TESTFN, *x)
only_nt = unittest.skipIf(os.name != 'nt',
'test requires a Windows-compatible system')
only_posix = unittest.skipIf(os.name == 'nt',
'test requires a POSIX-compatible system')
@only_posix
class PosixPathAsPureTest(PurePosixPathTest):
cls = pathlib.PosixPath
@only_nt
class WindowsPathAsPureTest(PureWindowsPathTest):
cls = pathlib.WindowsPath
def test_owner(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').owner()
def test_group(self):
P = self.cls
with self.assertRaises(NotImplementedError):
P('c:/').group()
class _BasePathTest(object):
"""Tests for the FS-accessing functionalities of the Path classes."""
# (BASE)
# |
# |-- brokenLink -> non-existing
# |-- dirA
# | `-- linkC -> ../dirB
# |-- dirB
# | |-- fileB
# | `-- linkD -> ../dirB
# |-- dirC
# | |-- dirD
# | | `-- fileD
# | `-- fileC
# |-- dirE # No permissions
# |-- fileA
# |-- linkA -> fileA
# |-- linkB -> dirB
# `-- brokenLinkLoop -> brokenLinkLoop
#
def setUp(self):
def cleanup():
os.chmod(join('dirE'), 0o777)
support.rmtree(BASE)
self.addCleanup(cleanup)
os.mkdir(BASE)
os.mkdir(join('dirA'))
os.mkdir(join('dirB'))
os.mkdir(join('dirC'))
os.mkdir(join('dirC', 'dirD'))
os.mkdir(join('dirE'))
with open(join('fileA'), 'wb') as f:
f.write(b"this is file A\n")
with open(join('dirB', 'fileB'), 'wb') as f:
f.write(b"this is file B\n")
with open(join('dirC', 'fileC'), 'wb') as f:
f.write(b"this is file C\n")
with open(join('dirC', 'dirD', 'fileD'), 'wb') as f:
f.write(b"this is file D\n")
os.chmod(join('dirE'), 0)
if support.can_symlink():
# Relative symlinks.
os.symlink('fileA', join('linkA'))
os.symlink('non-existing', join('brokenLink'))
self.dirlink('dirB', join('linkB'))
self.dirlink(os.path.join('..', 'dirB'), join('dirA', 'linkC'))
# This one goes upwards, creating a loop.
self.dirlink(os.path.join('..', 'dirB'), join('dirB', 'linkD'))
# Broken symlink (pointing to itself).
os.symlink('brokenLinkLoop', join('brokenLinkLoop'))
if os.name == 'nt':
# Workaround for http://bugs.python.org/issue13772.
def dirlink(self, src, dest):
os.symlink(src, dest, target_is_directory=True)
else:
def dirlink(self, src, dest):
os.symlink(src, dest)
def assertSame(self, path_a, path_b):
self.assertTrue(os.path.samefile(str(path_a), str(path_b)),
"%r and %r don't point to the same file" %
(path_a, path_b))
def assertFileNotFound(self, func, *args, **kwargs):
with self.assertRaises(FileNotFoundError) as cm:
func(*args, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def assertEqualNormCase(self, path_a, path_b):
self.assertEqual(os.path.normcase(path_a), os.path.normcase(path_b))
def _test_cwd(self, p):
q = self.cls(os.getcwd())
self.assertEqual(p, q)
self.assertEqualNormCase(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_cwd(self):
p = self.cls.cwd()
self._test_cwd(p)
def _test_home(self, p):
q = self.cls(os.path.expanduser('~'))
self.assertEqual(p, q)
self.assertEqualNormCase(str(p), str(q))
self.assertIs(type(p), type(q))
self.assertTrue(p.is_absolute())
def test_home(self):
with support.EnvironmentVarGuard() as env:
self._test_home(self.cls.home())
env.clear()
env['USERPROFILE'] = os.path.join(BASE, 'userprofile')
self._test_home(self.cls.home())
# bpo-38883: ignore `HOME` when set on windows
env['HOME'] = os.path.join(BASE, 'home')
self._test_home(self.cls.home())
def test_samefile(self):
fileA_path = os.path.join(BASE, 'fileA')
fileB_path = os.path.join(BASE, 'dirB', 'fileB')
p = self.cls(fileA_path)
pp = self.cls(fileA_path)
q = self.cls(fileB_path)
self.assertTrue(p.samefile(fileA_path))
self.assertTrue(p.samefile(pp))
self.assertFalse(p.samefile(fileB_path))
self.assertFalse(p.samefile(q))
# Test the non-existent file case
non_existent = os.path.join(BASE, 'foo')
r = self.cls(non_existent)
self.assertRaises(FileNotFoundError, p.samefile, r)
self.assertRaises(FileNotFoundError, p.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, p)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
self.assertRaises(FileNotFoundError, r.samefile, r)
self.assertRaises(FileNotFoundError, r.samefile, non_existent)
def test_empty_path(self):
# The empty path points to '.'
p = self.cls('')
self.assertEqual(p.stat(), os.stat('.'))
def test_expanduser_common(self):
P = self.cls
p = P('~')
self.assertEqual(p.expanduser(), P(os.path.expanduser('~')))
p = P('foo')
self.assertEqual(p.expanduser(), p)
p = P('/~')
self.assertEqual(p.expanduser(), p)
p = P('../~')
self.assertEqual(p.expanduser(), p)
p = P(P('').absolute().anchor) / '~'
self.assertEqual(p.expanduser(), p)
def test_exists(self):
P = self.cls
p = P(BASE)
self.assertIs(True, p.exists())
self.assertIs(True, (p / 'dirA').exists())
self.assertIs(True, (p / 'fileA').exists())
self.assertIs(False, (p / 'fileA' / 'bah').exists())
if support.can_symlink():
self.assertIs(True, (p / 'linkA').exists())
self.assertIs(True, (p / 'linkB').exists())
self.assertIs(True, (p / 'linkB' / 'fileB').exists())
self.assertIs(False, (p / 'linkA' / 'bah').exists())
self.assertIs(False, (p / 'foo').exists())
self.assertIs(False, P('/xyzzy').exists())
self.assertIs(False, P(BASE + '\udfff').exists())
self.assertIs(False, P(BASE + '\x00').exists())
def test_open_common(self):
p = self.cls(BASE)
with (p / 'fileA').open('r') as f:
self.assertIsInstance(f, io.TextIOBase)
self.assertEqual(f.read(), "this is file A\n")
with (p / 'fileA').open('rb') as f:
self.assertIsInstance(f, io.BufferedIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
with (p / 'fileA').open('rb', buffering=0) as f:
self.assertIsInstance(f, io.RawIOBase)
self.assertEqual(f.read().strip(), b"this is file A")
def test_read_write_bytes(self):
p = self.cls(BASE)
(p / 'fileA').write_bytes(b'abcdefg')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
# Check that trying to write str does not truncate the file.
self.assertRaises(TypeError, (p / 'fileA').write_bytes, 'somestr')
self.assertEqual((p / 'fileA').read_bytes(), b'abcdefg')
def test_read_write_text(self):
p = self.cls(BASE)
(p / 'fileA').write_text('äbcdefg', encoding='latin-1')
self.assertEqual((p / 'fileA').read_text(
encoding='utf-8', errors='ignore'), 'bcdefg')
# Check that trying to write bytes does not truncate the file.
self.assertRaises(TypeError, (p / 'fileA').write_text, b'somebytes')
self.assertEqual((p / 'fileA').read_text(encoding='latin-1'), 'äbcdefg')
def test_iterdir(self):
P = self.cls
p = P(BASE)
it = p.iterdir()
paths = set(it)
expected = ['dirA', 'dirB', 'dirC', 'dirE', 'fileA']
if support.can_symlink():
expected += ['linkA', 'linkB', 'brokenLink', 'brokenLinkLoop']
self.assertEqual(paths, { P(BASE, q) for q in expected })
@support.skip_unless_symlink
def test_iterdir_symlink(self):
# __iter__ on a symlink to a directory.
P = self.cls
p = P(BASE, 'linkB')
paths = set(p.iterdir())
expected = { P(BASE, 'linkB', q) for q in ['fileB', 'linkD'] }
self.assertEqual(paths, expected)
def test_iterdir_nodir(self):
# __iter__ on something that is not a directory.
p = self.cls(BASE, 'fileA')
with self.assertRaises(OSError) as cm:
next(p.iterdir())
# ENOENT or EINVAL under Windows, ENOTDIR otherwise
# (see issue #12802).
self.assertIn(cm.exception.errno, (errno.ENOTDIR,
errno.ENOENT, errno.EINVAL))
def test_glob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.glob("fileA")
self.assertIsInstance(it, collections.abc.Iterator)
_check(it, ["fileA"])
_check(p.glob("fileB"), [])
_check(p.glob("dir*/file*"), ["dirB/fileB", "dirC/fileC"])
if not support.can_symlink():
_check(p.glob("*A"), ['dirA', 'fileA'])
else:
_check(p.glob("*A"), ['dirA', 'fileA', 'linkA'])
if not support.can_symlink():
_check(p.glob("*B/*"), ['dirB/fileB'])
else:
_check(p.glob("*B/*"), ['dirB/fileB', 'dirB/linkD',
'linkB/fileB', 'linkB/linkD'])
if not support.can_symlink():
_check(p.glob("*/fileB"), ['dirB/fileB'])
else:
_check(p.glob("*/fileB"), ['dirB/fileB', 'linkB/fileB'])
def test_rglob_common(self):
def _check(glob, expected):
self.assertEqual(set(glob), { P(BASE, q) for q in expected })
P = self.cls
p = P(BASE)
it = p.rglob("fileA")
self.assertIsInstance(it, collections.abc.Iterator)
_check(it, ["fileA"])
_check(p.rglob("fileB"), ["dirB/fileB"])
_check(p.rglob("*/fileA"), [])
if not support.can_symlink():
_check(p.rglob("*/fileB"), ["dirB/fileB"])
else:
_check(p.rglob("*/fileB"), ["dirB/fileB", "dirB/linkD/fileB",
"linkB/fileB", "dirA/linkC/fileB"])
_check(p.rglob("file*"), ["fileA", "dirB/fileB",
"dirC/fileC", "dirC/dirD/fileD"])
p = P(BASE, "dirC")
_check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"])
_check(p.rglob("*/*"), ["dirC/dirD/fileD"])
@support.skip_unless_symlink
def test_rglob_symlink_loop(self):
# Don't get fooled by symlink loops (Issue #26012).
P = self.cls
p = P(BASE)
given = set(p.rglob('*'))
expect = {'brokenLink',
'dirA', 'dirA/linkC',
'dirB', 'dirB/fileB', 'dirB/linkD',
'dirC', 'dirC/dirD', 'dirC/dirD/fileD', 'dirC/fileC',
'dirE',
'fileA',
'linkA',
'linkB',
'brokenLinkLoop',
}
self.assertEqual(given, {p / x for x in expect})
def test_glob_many_open_files(self):
depth = 30
P = self.cls
base = P(BASE) / 'deep'
p = P(base, *(['d']*depth))
p.mkdir(parents=True)
pattern = '/'.join(['*'] * depth)
iters = [base.glob(pattern) for j in range(100)]
for it in iters:
self.assertEqual(next(it), p)
iters = [base.rglob('d') for j in range(100)]
p = base
for i in range(depth):
p = p / 'd'
for it in iters:
self.assertEqual(next(it), p)
def test_glob_dotdot(self):
# ".." is not special in globs.
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("..")), { P(BASE, "..") })
self.assertEqual(set(p.glob("dirA/../file*")), { P(BASE, "dirA/../fileA") })
self.assertEqual(set(p.glob("../xyzzy")), set())
def _check_resolve(self, p, expected, strict=True):
q = p.resolve(strict)
self.assertEqual(q, expected)
# This can be used to check both relative and absolute resolutions.
_check_resolve_relative = _check_resolve_absolute = _check_resolve
@support.skip_unless_symlink
def test_resolve_common(self):
P = self.cls
p = P(BASE, 'foo')
with self.assertRaises(OSError) as cm:
p.resolve(strict=True)
self.assertEqual(cm.exception.errno, errno.ENOENT)
# Non-strict
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.join(BASE, 'foo'))
p = P(BASE, 'foo', 'in', 'spam')
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.join(BASE, 'foo', 'in', 'spam'))
p = P(BASE, '..', 'foo', 'in', 'spam')
self.assertEqualNormCase(str(p.resolve(strict=False)),
os.path.abspath(os.path.join('foo', 'in', 'spam')))
# These are all relative symlinks.
p = P(BASE, 'dirB', 'fileB')
self._check_resolve_relative(p, p)
p = P(BASE, 'linkA')
self._check_resolve_relative(p, P(BASE, 'fileA'))
p = P(BASE, 'dirA', 'linkC', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
p = P(BASE, 'dirB', 'linkD', 'fileB')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB'))
# Non-strict
p = P(BASE, 'dirA', 'linkC', 'fileB', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'fileB', 'foo', 'in',
'spam'), False)
p = P(BASE, 'dirA', 'linkC', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
# In Windows, if linkY points to dirB, 'dirA\linkY\..'
# resolves to 'dirA' without resolving linkY first.
self._check_resolve_relative(p, P(BASE, 'dirA', 'foo', 'in',
'spam'), False)
else:
# In Posix, if linkY points to dirB, 'dirA/linkY/..'
# resolves to 'dirB/..' first before resolving to parent of dirB.
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False)
# Now create absolute symlinks.
d = support._longpath(tempfile.mkdtemp(suffix='-dirD', dir=os.getcwd()))
self.addCleanup(support.rmtree, d)
os.symlink(os.path.join(d), join('dirA', 'linkX'))
os.symlink(join('dirB'), os.path.join(d, 'linkY'))
p = P(BASE, 'dirA', 'linkX', 'linkY', 'fileB')
self._check_resolve_absolute(p, P(BASE, 'dirB', 'fileB'))
# Non-strict
p = P(BASE, 'dirA', 'linkX', 'linkY', 'foo', 'in', 'spam')
self._check_resolve_relative(p, P(BASE, 'dirB', 'foo', 'in', 'spam'),
False)
p = P(BASE, 'dirA', 'linkX', 'linkY', '..', 'foo', 'in', 'spam')
if os.name == 'nt':
# In Windows, if linkY points to dirB, 'dirA\linkY\..'
# resolves to 'dirA' without resolving linkY first.
self._check_resolve_relative(p, P(d, 'foo', 'in', 'spam'), False)
else:
# In Posix, if linkY points to dirB, 'dirA/linkY/..'
# resolves to 'dirB/..' first before resolving to parent of dirB.
self._check_resolve_relative(p, P(BASE, 'foo', 'in', 'spam'), False)
@support.skip_unless_symlink
def test_resolve_dot(self):
# See https://bitbucket.org/pitrou/pathlib/issue/9/pathresolve-fails-on-complex-symlinks
p = self.cls(BASE)
self.dirlink('.', join('0'))
self.dirlink(os.path.join('0', '0'), join('1'))
self.dirlink(os.path.join('1', '1'), join('2'))
q = p / '2'
self.assertEqual(q.resolve(strict=True), p)
r = q / '3' / '4'
self.assertRaises(FileNotFoundError, r.resolve, strict=True)
# Non-strict
self.assertEqual(r.resolve(strict=False), p / '3' / '4')
def test_with(self):
p = self.cls(BASE)
it = p.iterdir()
it2 = p.iterdir()
next(it2)
with p:
pass
# I/O operation on closed path.
self.assertRaises(ValueError, next, it)
self.assertRaises(ValueError, next, it2)
self.assertRaises(ValueError, p.open)
self.assertRaises(ValueError, p.resolve)
self.assertRaises(ValueError, p.absolute)
self.assertRaises(ValueError, p.__enter__)
def test_chmod(self):
p = self.cls(BASE) / 'fileA'
mode = p.stat().st_mode
# Clear writable bit.
new_mode = mode & ~0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# Set writable bit.
new_mode = mode | 0o222
p.chmod(new_mode)
self.assertEqual(p.stat().st_mode, new_mode)
# XXX also need a test for lchmod.
def test_stat(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(p.stat(), st)
# Change file mode by flipping write bit.
p.chmod(st.st_mode ^ 0o222)
self.addCleanup(p.chmod, st.st_mode)
self.assertNotEqual(p.stat(), st)
@support.skip_unless_symlink
def test_lstat(self):
p = self.cls(BASE)/ 'linkA'
st = p.stat()
self.assertNotEqual(st, p.lstat())
def test_lstat_nosymlink(self):
p = self.cls(BASE) / 'fileA'
st = p.stat()
self.assertEqual(st, p.lstat())
@unittest.skipUnless(pwd, "the pwd module is needed for this test")
def test_owner(self):
p = self.cls(BASE) / 'fileA'
uid = p.stat().st_uid
try:
name = pwd.getpwuid(uid).pw_name
except KeyError:
self.skipTest(
"user %d doesn't have an entry in the system database" % uid)
self.assertEqual(name, p.owner())
@unittest.skipUnless(grp, "the grp module is needed for this test")
def test_group(self):
p = self.cls(BASE) / 'fileA'
gid = p.stat().st_gid
try:
name = grp.getgrgid(gid).gr_name
except KeyError:
self.skipTest(
"group %d doesn't have an entry in the system database" % gid)
self.assertEqual(name, p.group())
def test_unlink(self):
p = self.cls(BASE) / 'fileA'
p.unlink()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
def test_unlink_missing_ok(self):
p = self.cls(BASE) / 'fileAAA'
self.assertFileNotFound(p.unlink)
p.unlink(missing_ok=True)
def test_rmdir(self):
p = self.cls(BASE) / 'dirA'
for q in p.iterdir():
q.unlink()
p.rmdir()
self.assertFileNotFound(p.stat)
self.assertFileNotFound(p.unlink)
@unittest.skipUnless(hasattr(os, "link"), "os.link() is not present")
def test_link_to(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# linking to another path.
q = P / 'dirA' / 'fileAA'
try:
p.link_to(q)
except PermissionError as e:
self.skipTest('os.link(): %s' % e)
self.assertEqual(q.stat().st_size, size)
self.assertEqual(os.path.samefile(p, q), True)
self.assertTrue(p.stat)
# Linking to a str of a relative path.
r = rel_join('fileAAA')
q.link_to(r)
self.assertEqual(os.stat(r).st_size, size)
self.assertTrue(q.stat)
@unittest.skipIf(hasattr(os, "link"), "os.link() is present")
def test_link_to_not_implemented(self):
P = self.cls(BASE)
p = P / 'fileA'
# linking to another path.
q = P / 'dirA' / 'fileAA'
with self.assertRaises(NotImplementedError):
p.link_to(q)
def test_rename(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Renaming to another path.
q = P / 'dirA' / 'fileAA'
renamed_p = p.rename(q)
self.assertEqual(renamed_p, q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Renaming to a str of a relative path.
r = rel_join('fileAAA')
renamed_q = q.rename(r)
self.assertEqual(renamed_q, self.cls(r))
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_replace(self):
P = self.cls(BASE)
p = P / 'fileA'
size = p.stat().st_size
# Replacing a non-existing path.
q = P / 'dirA' / 'fileAA'
replaced_p = p.replace(q)
self.assertEqual(replaced_p, q)
self.assertEqual(q.stat().st_size, size)
self.assertFileNotFound(p.stat)
# Replacing another (existing) path.
r = rel_join('dirB', 'fileB')
replaced_q = q.replace(r)
self.assertEqual(replaced_q, self.cls(r))
self.assertEqual(os.stat(r).st_size, size)
self.assertFileNotFound(q.stat)
def test_touch_common(self):
P = self.cls(BASE)
p = P / 'newfileA'
self.assertFalse(p.exists())
p.touch()
self.assertTrue(p.exists())
st = p.stat()
old_mtime = st.st_mtime
old_mtime_ns = st.st_mtime_ns
# Rewind the mtime sufficiently far in the past to work around
# filesystem-specific timestamp granularity.
os.utime(str(p), (old_mtime - 10, old_mtime - 10))
# The file mtime should be refreshed by calling touch() again.
p.touch()
st = p.stat()
self.assertGreaterEqual(st.st_mtime_ns, old_mtime_ns)
self.assertGreaterEqual(st.st_mtime, old_mtime)
# Now with exist_ok=False.
p = P / 'newfileB'
self.assertFalse(p.exists())
p.touch(mode=0o700, exist_ok=False)
self.assertTrue(p.exists())
self.assertRaises(OSError, p.touch, exist_ok=False)
def test_touch_nochange(self):
P = self.cls(BASE)
p = P / 'fileA'
p.touch()
with p.open('rb') as f:
self.assertEqual(f.read().strip(), b"this is file A")
def test_mkdir(self):
P = self.cls(BASE)
p = P / 'newdirA'
self.assertFalse(p.exists())
p.mkdir()
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_parents(self):
# Creating a chain of directories.
p = self.cls(BASE, 'newdirB', 'newdirC')
self.assertFalse(p.exists())
with self.assertRaises(OSError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.ENOENT)
p.mkdir(parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(OSError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
# Test `mode` arg.
mode = stat.S_IMODE(p.stat().st_mode) # Default mode.
p = self.cls(BASE, 'newdirD', 'newdirE')
p.mkdir(0o555, parents=True)
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
if os.name != 'nt':
# The directory's permissions follow the mode argument.
self.assertEqual(stat.S_IMODE(p.stat().st_mode), 0o7555 & mode)
# The parent's permissions follow the default process settings.
self.assertEqual(stat.S_IMODE(p.parent.stat().st_mode), mode)
def test_mkdir_exist_ok(self):
p = self.cls(BASE, 'dirB')
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
self.assertTrue(p.is_dir())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_with_parent(self):
p = self.cls(BASE, 'dirC')
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
p = p / 'newdirC'
p.mkdir(parents=True)
st_ctime_first = p.stat().st_ctime
self.assertTrue(p.exists())
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
p.mkdir(parents=True, exist_ok=True)
self.assertTrue(p.exists())
self.assertEqual(p.stat().st_ctime, st_ctime_first)
def test_mkdir_exist_ok_root(self):
# Issue #25803: A drive root could raise PermissionError on Windows.
self.cls('/').resolve().mkdir(exist_ok=True)
self.cls('/').resolve().mkdir(parents=True, exist_ok=True)
@only_nt # XXX: not sure how to test this on POSIX.
def test_mkdir_with_unknown_drive(self):
for d in 'ZYXWVUTSRQPONMLKJIHGFEDCBA':
p = self.cls(d + ':\\')
if not p.is_dir():
break
else:
self.skipTest("cannot find a drive that doesn't exist")
with self.assertRaises(OSError):
(p / 'child' / 'path').mkdir(parents=True)
def test_mkdir_with_child_file(self):
p = self.cls(BASE, 'dirB', 'fileB')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(parents=True, exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_no_parents_file(self):
p = self.cls(BASE, 'fileA')
self.assertTrue(p.exists())
# An exception is raised when the last path component is an existing
# regular file, regardless of whether exist_ok is true or not.
with self.assertRaises(FileExistsError) as cm:
p.mkdir()
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(FileExistsError) as cm:
p.mkdir(exist_ok=True)
self.assertEqual(cm.exception.errno, errno.EEXIST)
def test_mkdir_concurrent_parent_creation(self):
for pattern_num in range(32):
p = self.cls(BASE, 'dirCPC%d' % pattern_num)
self.assertFalse(p.exists())
def my_mkdir(path, mode=0o777):
path = str(path)
# Emulate another process that would create the directory
# just before we try to create it ourselves. We do it
# in all possible pattern combinations, assuming that this
# function is called at most 5 times (dirCPC/dir1/dir2,
# dirCPC/dir1, dirCPC, dirCPC/dir1, dirCPC/dir1/dir2).
if pattern.pop():
os.mkdir(path, mode) # From another process.
concurrently_created.add(path)
os.mkdir(path, mode) # Our real call.
pattern = [bool(pattern_num & (1 << n)) for n in range(5)]
concurrently_created = set()
p12 = p / 'dir1' / 'dir2'
try:
with mock.patch("pathlib._normal_accessor.mkdir", my_mkdir):
p12.mkdir(parents=True, exist_ok=False)
except FileExistsError:
self.assertIn(str(p12), concurrently_created)
else:
self.assertNotIn(str(p12), concurrently_created)
self.assertTrue(p.exists())
@support.skip_unless_symlink
def test_symlink_to(self):
P = self.cls(BASE)
target = P / 'fileA'
# Symlinking a path target.
link = P / 'dirA' / 'linkAA'
link.symlink_to(target)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
# Symlinking a str target.
link = P / 'dirA' / 'linkAAA'
link.symlink_to(str(target))
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertFalse(link.is_dir())
# Symlinking to a directory.
target = P / 'dirB'
link = P / 'dirA' / 'linkAAAA'
link.symlink_to(target, target_is_directory=True)
self.assertEqual(link.stat(), target.stat())
self.assertNotEqual(link.lstat(), target.stat())
self.assertTrue(link.is_dir())
self.assertTrue(list(link.iterdir()))
def test_is_dir(self):
P = self.cls(BASE)
self.assertTrue((P / 'dirA').is_dir())
self.assertFalse((P / 'fileA').is_dir())
self.assertFalse((P / 'non-existing').is_dir())
self.assertFalse((P / 'fileA' / 'bah').is_dir())
if support.can_symlink():
self.assertFalse((P / 'linkA').is_dir())
self.assertTrue((P / 'linkB').is_dir())
self.assertFalse((P/ 'brokenLink').is_dir(), False)
self.assertIs((P / 'dirA\udfff').is_dir(), False)
self.assertIs((P / 'dirA\x00').is_dir(), False)
def test_is_file(self):
P = self.cls(BASE)
self.assertTrue((P / 'fileA').is_file())
self.assertFalse((P / 'dirA').is_file())
self.assertFalse((P / 'non-existing').is_file())
self.assertFalse((P / 'fileA' / 'bah').is_file())
if support.can_symlink():
self.assertTrue((P / 'linkA').is_file())
self.assertFalse((P / 'linkB').is_file())
self.assertFalse((P/ 'brokenLink').is_file())
self.assertIs((P / 'fileA\udfff').is_file(), False)
self.assertIs((P / 'fileA\x00').is_file(), False)
@only_posix
def test_is_mount(self):
P = self.cls(BASE)
R = self.cls('/') # TODO: Work out Windows.
self.assertFalse((P / 'fileA').is_mount())
self.assertFalse((P / 'dirA').is_mount())
self.assertFalse((P / 'non-existing').is_mount())
self.assertFalse((P / 'fileA' / 'bah').is_mount())
self.assertTrue(R.is_mount())
if support.can_symlink():
self.assertFalse((P / 'linkA').is_mount())
self.assertIs(self.cls('/\udfff').is_mount(), False)
self.assertIs(self.cls('/\x00').is_mount(), False)
def test_is_symlink(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_symlink())
self.assertFalse((P / 'dirA').is_symlink())
self.assertFalse((P / 'non-existing').is_symlink())
self.assertFalse((P / 'fileA' / 'bah').is_symlink())
if support.can_symlink():
self.assertTrue((P / 'linkA').is_symlink())
self.assertTrue((P / 'linkB').is_symlink())
self.assertTrue((P/ 'brokenLink').is_symlink())
self.assertIs((P / 'fileA\udfff').is_file(), False)
self.assertIs((P / 'fileA\x00').is_file(), False)
if support.can_symlink():
self.assertIs((P / 'linkA\udfff').is_file(), False)
self.assertIs((P / 'linkA\x00').is_file(), False)
def test_is_fifo_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_fifo())
self.assertFalse((P / 'dirA').is_fifo())
self.assertFalse((P / 'non-existing').is_fifo())
self.assertFalse((P / 'fileA' / 'bah').is_fifo())
self.assertIs((P / 'fileA\udfff').is_fifo(), False)
self.assertIs((P / 'fileA\x00').is_fifo(), False)
@unittest.skipUnless(hasattr(os, "mkfifo"), "os.mkfifo() required")
def test_is_fifo_true(self):
P = self.cls(BASE, 'myfifo')
try:
os.mkfifo(str(P))
except PermissionError as e:
self.skipTest('os.mkfifo(): %s' % e)
self.assertTrue(P.is_fifo())
self.assertFalse(P.is_socket())
self.assertFalse(P.is_file())
self.assertIs(self.cls(BASE, 'myfifo\udfff').is_fifo(), False)
self.assertIs(self.cls(BASE, 'myfifo\x00').is_fifo(), False)
def test_is_socket_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_socket())
self.assertFalse((P / 'dirA').is_socket())
self.assertFalse((P / 'non-existing').is_socket())
self.assertFalse((P / 'fileA' / 'bah').is_socket())
self.assertIs((P / 'fileA\udfff').is_socket(), False)
self.assertIs((P / 'fileA\x00').is_socket(), False)
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
def test_is_socket_true(self):
P = self.cls(BASE, 'mysock')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(sock.close)
try:
sock.bind(str(P))
except OSError as e:
if (isinstance(e, PermissionError) or
"AF_UNIX path too long" in str(e)):
self.skipTest("cannot bind Unix socket: " + str(e))
self.assertTrue(P.is_socket())
self.assertFalse(P.is_fifo())
self.assertFalse(P.is_file())
self.assertIs(self.cls(BASE, 'mysock\udfff').is_socket(), False)
self.assertIs(self.cls(BASE, 'mysock\x00').is_socket(), False)
def test_is_block_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_block_device())
self.assertFalse((P / 'dirA').is_block_device())
self.assertFalse((P / 'non-existing').is_block_device())
self.assertFalse((P / 'fileA' / 'bah').is_block_device())
self.assertIs((P / 'fileA\udfff').is_block_device(), False)
self.assertIs((P / 'fileA\x00').is_block_device(), False)
def test_is_char_device_false(self):
P = self.cls(BASE)
self.assertFalse((P / 'fileA').is_char_device())
self.assertFalse((P / 'dirA').is_char_device())
self.assertFalse((P / 'non-existing').is_char_device())
self.assertFalse((P / 'fileA' / 'bah').is_char_device())
self.assertIs((P / 'fileA\udfff').is_char_device(), False)
self.assertIs((P / 'fileA\x00').is_char_device(), False)
def test_is_char_device_true(self):
# Under Unix, /dev/null should generally be a char device.
P = self.cls('/dev/null')
if not P.exists():
self.skipTest("/dev/null required")
self.assertTrue(P.is_char_device())
self.assertFalse(P.is_block_device())
self.assertFalse(P.is_file())
self.assertIs(self.cls('/dev/null\udfff').is_char_device(), False)
self.assertIs(self.cls('/dev/null\x00').is_char_device(), False)
def test_pickling_common(self):
p = self.cls(BASE, 'fileA')
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(p, proto)
pp = pickle.loads(dumped)
self.assertEqual(pp.stat(), p.stat())
def test_parts_interning(self):
P = self.cls
p = P('/usr/bin/foo')
q = P('/usr/local/bin')
# 'usr'
self.assertIs(p.parts[1], q.parts[1])
# 'bin'
self.assertIs(p.parts[2], q.parts[3])
def _check_complex_symlinks(self, link0_target):
# Test solving a non-looping chain of symlinks (issue #19887).
P = self.cls(BASE)
self.dirlink(os.path.join('link0', 'link0'), join('link1'))
self.dirlink(os.path.join('link1', 'link1'), join('link2'))
self.dirlink(os.path.join('link2', 'link2'), join('link3'))
self.dirlink(link0_target, join('link0'))
# Resolve absolute paths.
p = (P / 'link0').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link1').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link2').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = (P / 'link3').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
# Resolve relative paths.
old_path = os.getcwd()
os.chdir(BASE)
try:
p = self.cls('link0').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link1').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link2').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
p = self.cls('link3').resolve()
self.assertEqual(p, P)
self.assertEqualNormCase(str(p), BASE)
finally:
os.chdir(old_path)
@support.skip_unless_symlink
def test_complex_symlinks_absolute(self):
self._check_complex_symlinks(BASE)
@support.skip_unless_symlink
def test_complex_symlinks_relative(self):
self._check_complex_symlinks('.')
@support.skip_unless_symlink
def test_complex_symlinks_relative_dot_dot(self):
self._check_complex_symlinks(os.path.join('dirA', '..'))
class PathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.Path
def test_concrete_class(self):
p = self.cls('a')
self.assertIs(type(p),
pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath)
def test_unsupported_flavour(self):
if os.name == 'nt':
self.assertRaises(NotImplementedError, pathlib.PosixPath)
else:
self.assertRaises(NotImplementedError, pathlib.WindowsPath)
def test_glob_empty_pattern(self):
p = self.cls()
with self.assertRaisesRegex(ValueError, 'Unacceptable pattern'):
list(p.glob(''))
@only_posix
class PosixPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.PosixPath
def _check_symlink_loop(self, *args, strict=True):
path = self.cls(*args)
with self.assertRaises(RuntimeError):
print(path.resolve(strict))
def test_open_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
with (p / 'new_file').open('wb'):
pass
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
with (p / 'other_new_file').open('wb'):
pass
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
def test_touch_mode(self):
old_mask = os.umask(0)
self.addCleanup(os.umask, old_mask)
p = self.cls(BASE)
(p / 'new_file').touch()
st = os.stat(join('new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o666)
os.umask(0o022)
(p / 'other_new_file').touch()
st = os.stat(join('other_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o644)
(p / 'masked_new_file').touch(mode=0o750)
st = os.stat(join('masked_new_file'))
self.assertEqual(stat.S_IMODE(st.st_mode), 0o750)
@support.skip_unless_symlink
def test_resolve_loop(self):
# Loops with relative symlinks.
os.symlink('linkX/inside', join('linkX'))
self._check_symlink_loop(BASE, 'linkX')
os.symlink('linkY', join('linkY'))
self._check_symlink_loop(BASE, 'linkY')
os.symlink('linkZ/../linkZ', join('linkZ'))
self._check_symlink_loop(BASE, 'linkZ')
# Non-strict
self._check_symlink_loop(BASE, 'linkZ', 'foo', strict=False)
# Loops with absolute symlinks.
os.symlink(join('linkU/inside'), join('linkU'))
self._check_symlink_loop(BASE, 'linkU')
os.symlink(join('linkV'), join('linkV'))
self._check_symlink_loop(BASE, 'linkV')
os.symlink(join('linkW/../linkW'), join('linkW'))
self._check_symlink_loop(BASE, 'linkW')
# Non-strict
self._check_symlink_loop(BASE, 'linkW', 'foo', strict=False)
def test_glob(self):
P = self.cls
p = P(BASE)
given = set(p.glob("FILEa"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.glob("FILEa*")), set())
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
given = set(p.rglob("FILEd"))
expect = set() if not support.fs_is_case_insensitive(BASE) else given
self.assertEqual(given, expect)
self.assertEqual(set(p.rglob("FILEd*")), set())
@unittest.skipUnless(hasattr(pwd, 'getpwall'),
'pwd module does not expose getpwall()')
def test_expanduser(self):
P = self.cls
support.import_module('pwd')
import pwd
pwdent = pwd.getpwuid(os.getuid())
username = pwdent.pw_name
userhome = pwdent.pw_dir.rstrip('/') or '/'
# Find arbitrary different user (if exists).
for pwdent in pwd.getpwall():
othername = pwdent.pw_name
otherhome = pwdent.pw_dir.rstrip('/')
if othername != username and otherhome:
break
else:
othername = username
otherhome = userhome
p1 = P('~/Documents')
p2 = P('~' + username + '/Documents')
p3 = P('~' + othername + '/Documents')
p4 = P('../~' + username + '/Documents')
p5 = P('/~' + username + '/Documents')
p6 = P('')
p7 = P('~fakeuser/Documents')
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
self.assertEqual(p1.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
env['HOME'] = '/tmp'
self.assertEqual(p1.expanduser(), P('/tmp/Documents'))
self.assertEqual(p2.expanduser(), P(userhome) / 'Documents')
self.assertEqual(p3.expanduser(), P(otherhome) / 'Documents')
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
self.assertRaises(RuntimeError, p7.expanduser)
@unittest.skipIf(sys.platform != "darwin",
"Bad file descriptor in /dev/fd affects only macOS")
def test_handling_bad_descriptor(self):
try:
file_descriptors = list(pathlib.Path('/dev/fd').rglob("*"))[3:]
if not file_descriptors:
self.skipTest("no file descriptors - issue was not reproduced")
# Checking all file descriptors because there is no guarantee
# which one will fail.
for f in file_descriptors:
f.exists()
f.is_dir()
f.is_file()
f.is_symlink()
f.is_block_device()
f.is_char_device()
f.is_fifo()
f.is_socket()
except OSError as e:
if e.errno == errno.EBADF:
self.fail("Bad file descriptor not handled.")
raise
@only_nt
class WindowsPathTest(_BasePathTest, unittest.TestCase):
cls = pathlib.WindowsPath
def test_glob(self):
P = self.cls
p = P(BASE)
self.assertEqual(set(p.glob("FILEa")), { P(BASE, "fileA") })
self.assertEqual(set(p.glob("F*a")), { P(BASE, "fileA") })
self.assertEqual(set(map(str, p.glob("FILEa"))), {f"{p}\\FILEa"})
self.assertEqual(set(map(str, p.glob("F*a"))), {f"{p}\\fileA"})
def test_rglob(self):
P = self.cls
p = P(BASE, "dirC")
self.assertEqual(set(p.rglob("FILEd")), { P(BASE, "dirC/dirD/fileD") })
self.assertEqual(set(map(str, p.rglob("FILEd"))), {f"{p}\\dirD\\FILEd"})
def test_expanduser(self):
P = self.cls
with support.EnvironmentVarGuard() as env:
env.pop('HOME', None)
env.pop('USERPROFILE', None)
env.pop('HOMEPATH', None)
env.pop('HOMEDRIVE', None)
env['USERNAME'] = 'alice'
# test that the path returns unchanged
p1 = P('~/My Documents')
p2 = P('~alice/My Documents')
p3 = P('~bob/My Documents')
p4 = P('/~/My Documents')
p5 = P('d:~/My Documents')
p6 = P('')
self.assertRaises(RuntimeError, p1.expanduser)
self.assertRaises(RuntimeError, p2.expanduser)
self.assertRaises(RuntimeError, p3.expanduser)
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
def check():
env.pop('USERNAME', None)
self.assertEqual(p1.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertRaises(KeyError, p2.expanduser)
env['USERNAME'] = 'alice'
self.assertEqual(p2.expanduser(),
P('C:/Users/alice/My Documents'))
self.assertEqual(p3.expanduser(),
P('C:/Users/bob/My Documents'))
self.assertEqual(p4.expanduser(), p4)
self.assertEqual(p5.expanduser(), p5)
self.assertEqual(p6.expanduser(), p6)
env['HOMEPATH'] = 'C:\\Users\\alice'
check()
env['HOMEDRIVE'] = 'C:\\'
env['HOMEPATH'] = 'Users\\alice'
check()
env.pop('HOMEDRIVE', None)
env.pop('HOMEPATH', None)
env['USERPROFILE'] = 'C:\\Users\\alice'
check()
# bpo-38883: ignore `HOME` when set on windows
env['HOME'] = 'C:\\Users\\eve'
check()
class CompatiblePathTest(unittest.TestCase):
"""
Test that a type can be made compatible with PurePath
derivatives by implementing division operator overloads.
"""
class CompatPath:
"""
Minimum viable class to test PurePath compatibility.
Simply uses the division operator to join a given
string and the string value of another object with
a forward slash.
"""
def __init__(self, string):
self.string = string
def __truediv__(self, other):
return type(self)(f"{self.string}/{other}")
def __rtruediv__(self, other):
return type(self)(f"{other}/{self.string}")
def test_truediv(self):
result = pathlib.PurePath("test") / self.CompatPath("right")
self.assertIsInstance(result, self.CompatPath)
self.assertEqual(result.string, "test/right")
with self.assertRaises(TypeError):
# Verify improper operations still raise a TypeError
pathlib.PurePath("test") / 10
def test_rtruediv(self):
result = self.CompatPath("left") / pathlib.PurePath("test")
self.assertIsInstance(result, self.CompatPath)
self.assertEqual(result.string, "left/test")
with self.assertRaises(TypeError):
# Verify improper operations still raise a TypeError
10 / pathlib.PurePath("test")
if __name__ == "__main__":
unittest.main()
| 40.537826
| 96
| 0.539124
|
610664a3f36b9bba58a4ce2cbc6724c50bd4196a
| 11,430
|
py
|
Python
|
interactive.py
|
ghchen18/acl22_sixtp
|
445f1711f0ca7fff1abace00bdca3151b56dc769
|
[
"MIT"
] | null | null | null |
interactive.py
|
ghchen18/acl22_sixtp
|
445f1711f0ca7fff1abace00bdca3151b56dc769
|
[
"MIT"
] | null | null | null |
interactive.py
|
ghchen18/acl22_sixtp
|
445f1711f0ca7fff1abace00bdca3151b56dc769
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
from fairseq.dataclass.configs import FairseqConfig
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ''
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_, tgt_dict.string(constraint, cfg.common_eval.post_process)
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| 36.170886
| 90
| 0.595626
|
bbacc9b957d305d91b30bc0bb0a470e2c953dc6c
| 6,289
|
py
|
Python
|
examples/bnp_competition/scripts/bnp_model.py
|
machinebrains/neat-society
|
395c092083aa62ac8ad2a93a9afde659ecbd85fd
|
[
"BSD-3-Clause"
] | 2
|
2018-03-03T16:30:09.000Z
|
2022-01-02T17:02:22.000Z
|
examples/bnp_competition/scripts/bnp_model.py
|
machinebrains/neat-society
|
395c092083aa62ac8ad2a93a9afde659ecbd85fd
|
[
"BSD-3-Clause"
] | null | null | null |
examples/bnp_competition/scripts/bnp_model.py
|
machinebrains/neat-society
|
395c092083aa62ac8ad2a93a9afde659ecbd85fd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
###################################################################################################################
### This code is developed by HighEnergyDataScientests Team.
### Do not copy or modify without written approval from one of the team members.
###################################################################################################################
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
#from __future__ import print_function
import math
from neatsociety import nn, parallel, population, visualize
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import Imputer
import sklearn.metrics
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg") #Needed to save figures
import time
import os
### Controlling Parameters
output_col_name = "target"
test_col_name = "PredictedProb"
enable_feature_analysis = 1
id_col_name = "ID"
num_iterations = 5
### Creating output folders
if not os.path.isdir("../predictions"):
os.mkdir("../predictions")
if not os.path.isdir("../intermediate_data"):
os.mkdir("../intermediate_data")
if not os.path.isdir("../saved_states"):
os.mkdir("../saved_states")
def ceate_feature_map(features,featureMapFile):
outfile = open(featureMapFile, 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def fitness(genome):
net = nn.create_feed_forward_phenotype(genome)
output = net.array_activate(X_train[features].values)
logloss_error = sklearn.metrics.log_loss(y_train, output[:,0])
return 1.0 - logloss_error
def train_model(features,num_generations):
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
t0 = time.time()
print("## Train a NEAT model")
timestr = time.strftime("%Y%m%d-%H%M%S")
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'bnp_config')
# Use a pool of four workers to evaluate fitness in parallel.
pe = parallel.ParallelEvaluator(fitness,3,progress_bar=True,verbose=1)
pop = population.Population(config_path)
pop.run(pe.evaluate, num_generations)
print("total evolution time {0:.3f} sec".format((time.time() - t0)))
print("time per generation {0:.3f} sec".format(((time.time() - t0) / pop.generation)))
print('Number of evaluations: {0:d}'.format(pop.total_evaluations))
# Verify network output against training data.
print("## Test against verification data.")
winner = pop.statistics.best_genome()
net = nn.create_feed_forward_phenotype(winner)
p_train = net.array_activate(X_train[features].values)
p_valid = net.array_activate(X_valid[features].values)
score_train = sklearn.metrics.log_loss(y_train, p_train[:,0])
score_valid = sklearn.metrics.log_loss(y_valid, p_valid[:,0])
print("Score based on training data set = ", score_train)
print("Score based on validating data set = ", score_valid)
# Visualize the winner network and plot statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True)
print("## Predicting test data")
preds = net.array_activate(test[features].values)
test[test_col_name] = preds
test[[id_col_name,test_col_name]].to_csv("../predictions/pred_" + timestr + ".csv", index=False)
if __name__ == '__main__':
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Start Time Stamp ==== " + timestamp)
print("## Loading Data")
models_predictions_file = "../predictions/models_predictions.csv"
train = pd.read_csv('../inputs/train.csv')
test = pd.read_csv('../inputs/test.csv')
if os.path.isfile(models_predictions_file):
models_predictions = pd.read_csv(models_predictions_file)
else:
models_predictions = pd.DataFrame()
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Data Processing")
train = train.drop(id_col_name, axis=1)
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Data Encoding")
for f in train.columns:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
features = [s for s in train.columns.ravel().tolist() if s != output_col_name]
print("Features: ", features)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(train[features])
train[features] = imp.transform(train[features])
test[features] = imp.transform(test[features])
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Training")
numPos = len(train[train[output_col_name] == 1])
numNeg = len(train[train[output_col_name] == 0])
scaleRatio = float(numNeg) / float(numPos)
print("Number of postive " + str(numPos) + " , Number of negative " + str(numNeg) + " , Ratio Negative to Postive : " , str(scaleRatio))
test_size = 0.05
X_pos = train[train[output_col_name] == 1]
X_neg = train[train[output_col_name] == 0]
X_train_pos, X_valid_pos = train_test_split(X_pos, test_size=test_size)
X_train_neg, X_valid_neg = train_test_split(X_neg, test_size=test_size)
X_train = pd.concat([X_train_pos,X_train_neg])
X_valid = pd.concat([X_valid_pos,X_valid_neg])
X_train = X_train.iloc[np.random.permutation(len(X_train))]
X_valid = X_valid.iloc[np.random.permutation(len(X_valid))]
y_train = X_train[output_col_name]
y_valid = X_valid[output_col_name]
num_generations = 1000
train_model(features,num_generations)
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## End Time Stamp ==== " + timestamp)
| 35.331461
| 140
| 0.63778
|
036d2a3bcc4407671d720f7ee3eed816815b8a55
| 6,808
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
GitHubChuanYu/T3Project4_SystemIntegration
|
1cd2224c5b94292927441e46df137749a0520f09
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
GitHubChuanYu/T3Project4_SystemIntegration
|
1cd2224c5b94292927441e46df137749a0520f09
|
[
"MIT"
] | 8
|
2020-01-28T22:44:30.000Z
|
2022-02-10T01:24:18.000Z
|
ros/src/tl_detector/tl_detector.py
|
GitHubChuanYu/T3Project4_SystemIntegration
|
1cd2224c5b94292927441e46df137749a0520f09
|
[
"MIT"
] | 1
|
2019-05-29T01:41:18.000Z
|
2019-05-29T01:41:18.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane, Waypoint
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
import numpy as np
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoint_tree = None
self.waypoints_2d = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# Check if closest is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
# Equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#For testing, just return the light state
return light.state
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
# #Get classification
# return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 35.831579
| 132
| 0.64292
|
cf0cb877a1bab452b37fa092d6bb284bfd5051d8
| 3,791
|
py
|
Python
|
src/docker_merger.py
|
wct-devops/compact
|
0403ebe8c9bbfb8051b64a89993e6298e2c32d75
|
[
"MIT"
] | 2
|
2019-10-27T07:11:05.000Z
|
2020-02-19T05:34:19.000Z
|
src/docker_merger.py
|
wct-devops/compact
|
0403ebe8c9bbfb8051b64a89993e6298e2c32d75
|
[
"MIT"
] | 2
|
2020-02-19T10:03:22.000Z
|
2021-04-08T02:35:24.000Z
|
src/docker_merger.py
|
wct-devops/compact
|
0403ebe8c9bbfb8051b64a89993e6298e2c32d75
|
[
"MIT"
] | 1
|
2021-04-07T14:44:20.000Z
|
2021-04-07T14:44:20.000Z
|
import os
import sys
import json
UNFOLD_DIR = "unfold"
SHARED_DIR = os.path.join(UNFOLD_DIR, "shared" )
os.path.exists(UNFOLD_DIR) or os.makedirs(UNFOLD_DIR)
os.path.exists(SHARED_DIR) or os.makedirs(SHARED_DIR)
SHAID_EXISTS = {}
RESTORE_LAYERS = {}
merge_res_list=[]
if len(os.sys.argv) > 1 :
for line in open( os.sys.argv[1] ,'rt' ).readlines():
line = line.replace('\n','')
line = line.replace(' ','')
if len(line) > 1 :
SHAID_EXISTS[line] = 1
for image_name in os.listdir(UNFOLD_DIR):
each_image_home = os.path.join( UNFOLD_DIR, image_name )
if image_name == 'shared':
continue
layer_list = []
shaid_list = []
image_size=0
for file in os.listdir( each_image_home ):
if file[-5:] == '.json' :
if file == 'manifest.json':
d = json.loads( open( os.path.join(each_image_home, file) ,'rt').read() )
layer_list = d[0]["Layers"]
else:
d = json.loads( open( os.path.join(each_image_home, file) ,'rt').read() )
shaid_list = d["rootfs"]["diff_ids"]
for i in range(0, len(shaid_list)) :
if ( os.path.islink( os.path.join( each_image_home, layer_list[i] ))) :
continue
if shaid_list[i] in SHAID_EXISTS :
cmd = "cd " + each_image_home + "; > %s"%layer_list[i]
print(cmd)
print(os.popen( cmd ).read())
else:
layer_id = layer_list[i].split('/')[0]
shaid = shaid_list[i]
shared_shaid_path = os.path.join( SHARED_DIR, shaid)
cmd = "cd %s; mv %s %s.tar"%( each_image_home, layer_list[i], os.path.join("..", "..", shared_shaid_path))
print(cmd)
ret=os.system(cmd)
if ret != 0:
print("Execute CMD [{}] Failed!".format(cmd))
exit(1)
# sum layer size
du_cmd = "du -sm {}.tar".format(shared_shaid_path)
try:
du_res = os.popen(du_cmd).read().split()[0]
layer_size=int(du_res)
except Exception as err:
layer_size=0
image_size=image_size+layer_size
restore_target = RESTORE_LAYERS.get( "%s.tar"%shared_shaid_path )
if restore_target is None :
restore_target = []
RESTORE_LAYERS["%s.tar"%shared_shaid_path] = restore_target
restore_target.append( os.path.join( each_image_home, layer_id, "layer.tar" ) )
res_str = "{}: {}MB".format(image_name, image_size)
merge_res_list.append(res_str)
restore_shell = open("restore_layers.sh","wt")
for layer_name in RESTORE_LAYERS.keys():
restore_target = RESTORE_LAYERS[layer_name]
for i in range(0, len(restore_target)):
if i == len(restore_target) - 1:
restore_shell.write("mv %s %s\n"%(layer_name, restore_target[i]))
else:
restore_shell.write("cp %s %s\n"%(layer_name, restore_target[i]))
restore_shell.close()
merge_res_list=[line+"\n" for line in merge_res_list]
with open('summary/summary_compact_merge_size.txt','w') as f:
f.writelines(merge_res_list)
for filename in os.listdir( SHARED_DIR ):
if filename[-4:] == '.tar':
shaid = filename[:-4]
shared_shaid_path = os.path.join( SHARED_DIR, shaid)
os.path.exists(shared_shaid_path) or os.makedirs(shared_shaid_path)
tar_meta_file = os.path.join( SHARED_DIR , shaid + "_meta.json.gz")
cmd = "./tar-split disasm --output %s %s.tar | tar -C %s -x && rm %s.tar"%( tar_meta_file, shared_shaid_path, shared_shaid_path , shared_shaid_path)
print(cmd)
print(os.popen(cmd).read())
cmd="mksquashfs unfold/ images.squashfs"
print(cmd)
print(os.popen( cmd ).read())
| 37.166667
| 156
| 0.593775
|
036bc5af479029d3fa2e4e6a937bca7f78146171
| 16,211
|
py
|
Python
|
gs_api_client/swagger/models/snapshot_schedule_get_response.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 7
|
2019-07-12T13:59:45.000Z
|
2021-03-16T08:46:20.000Z
|
gs_api_client/swagger/models/snapshot_schedule_get_response.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 13
|
2020-01-23T07:50:29.000Z
|
2022-03-21T14:32:40.000Z
|
gs_api_client/swagger/models/snapshot_schedule_get_response.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API Specification
# Introduction Welcome to gridscales API documentation. A REST API is a programming interface that allows you to access and send data directly to our systems using HTTPS requests, without the need to use a web GUI. All the functionality you are already familiar with in your control panel is accessible through the API, including expert methods that are only available through the API. Allowing you to script any actions you require, regardless of their complexity. First we will start with a general overview about how the API works, followed by an extensive list of each endpoint, describing them in great detail. ## Requests For security, gridscale requires all API requests are made through the HTTPS protocol so that traffic is encrypted. The following table displays the different type of requests that the interface responds to, depending on the action you require. | Method | Description | | --- | --- | | GET | A simple search of information. The response is a JSON object. Requests using GET are always read-only. | | POST | Adds new objects and object relations. The POST request must contain all the required parameters in the form of a JSON object. | | PATCH | Changes an object or an object relation. The parameters in PATCH requests are usually optional, so only the changed parameters must be specified in a JSON object. | | DELETE | Deletes an object or object relation. The object is deleted if it exists. | | OPTIONS | Get an extensive list of the servers support methods and characteristics. We will not give example OPTION requests on each endpoint, as they are extensive and self-descriptive. | <aside class=\"notice\"> The methods PATCH and DELETE are idempotent - that is, a request with identical parameters can be sent several times, and it doesn't change the result. </aside> ## Status Codes | HTTP Status | `Message` | Description | | --- | --- | --- | | 200 | `OK` | The request has been successfully processed and the result of the request is transmitted in the response. | | 202 | `Accepted` | The request has been accepted, but will run at a later date. Meaning we can not guarantee the success of the request. You should poll the request to be notified once the resource has been provisioned - see the requests endpoint on how to poll. | | 204 | `No Content` | The request was successful, but the answer deliberately contains no data. | | 400 | `Bad Request` | The request message was built incorrectly. | | 401 | `Unauthorised` | The request can not be performed without a valid authentication. X-Auth UserId or X-Auth token HTTP header is not set or the userID / token is invalid. | | 402 | `Payment Required` | Action can not be executed - not provided any or invalid payment methods. | | 403 | `Forbidden` | The request was not carried out due to lack of authorization of the user or because an impossible action was requested. | | 404 | `Not Found` | The requested resource was not found. Will also be used if you do a resource exists, but the user does not have permission for it. | | 405 | `Method Not Allowed` | The request may be made only with other HTTP methods (eg GET rather than POST). | | 409 | `Conflict` | The request was made under false assumptions. For example, a user can not be created twice with the same email. | | 415 | `Unsupported Media Type` | The contents of the request have been submitted with an invalid media type. All POST or PATCH requests must have \"Content-Type : application / json\" as a header, and send a JSON object as a payload. | | 416 | `Requested Range Not Satisfiable` | The request could not be fulfilled. It is possible that a resource limit was reached or an IPv4 address pool is exhausted. | | 424 | `Failed Dependency` | The request could not be performed because the object is in the wrong status. | | 429 | `Too Many Requests` | The request has been rejected because rate limits have been exceeded. | <aside class=\"success\"> Status 200-204 indicates that the request has been accepted and is processed. </aside> <aside class=\"notice\"> Status 400-429 indicates that there was a problem with the request that originated on the client. You will find more information about the problem in the body of 4xx response. </aside> <aside class=\"warning\"> A status 500 means that there was a server-side problem and your request can not be processed now. </aside> ## Request Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Auth-userId | The user UUID. This can be found in the panel under \"API\" and will never change ( even after the change of user e-mail). | | X-Auth-Token | Is generated from the API hash and must be sent with all API requests. Both the token and its permissions can be configured in the panel.| ## Response Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Time-Provisioning | The time taken to process the request (in ms). | | X-Api-Identity | The currently active Provisioning API version. Useful when reporting bugs to us. | | X-Request-Id | The unique identifier of the request, be sure to include it when referring to a request. | | RateLimit-Limit | The number of requests that can be made per minute. | | RateLimit-Remaining | The number of requests that still remain before you hit your request limit. | | RateLimit-Reset | A [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in milliseconds of when the rate limit will reset, or the time at which a request no longer will return 429 - Too Many Requests. | ## Timestamp Format All timestamps follow <a href=\"https://de.wikipedia.org/wiki/ISO_8601\" target=\"_blank_\">ISO 8601</a> and issued in <a href=\"https://www.timeanddate.de/zeitzonen/utc-gmt\" target=\"_blank_\">UTC</a> ## CORS ### Cross Origin Resource Sharing To allow API access from other domains that supports the API CORS (Cross Origin Resource Sharing). See: enable-cors.org/ . This allows direct use the API in the browser running a JavaScript web control panel. All this is done in the background by the browser. The following HTTP headers are set by the API: Header | Parameter | Description --- | --- | --- Access-Control-Allow-Methods | GET, POST, PUT, PATCH, DELETE, OPTIONS | Contains all available methods that may be used for queries. Access-Control-Allow-Credentials | true | Is set to \"true\". Allows the browser to send the authentication data via X-Auth HTTP header. Access-Control-Allow-Headers | Origin, X-Requested-With, Content-Type, Accept, X-Auth-UserId, X-Auth-Token, X-Exec-Time, X-API-Version, X-Api-Client | The HTTP headers available for requests. Access-Control-Allow-Origin | * | The domain sent by the browser as a source of demand. Access-Control-Expose-Headers | X-Exec-Time, X-Api-Version | The HTTP headers that can be used by a browser application. ## Rate Limits The number of requests that can be made through our API is currently limited to 210 requests per 60 seconds. The current state of rate limiting is returned within the response headers of each request. The relevant response headers are - RateLimit-Limit - RateLimit-Remaining - RateLimit-Reset See the Response Headers section for details. As long as the `RateLimit-Remaining` count is above zero, you will be able to make further requests. As soon as the `RateLimit-Remaining` header value is zero, subsequent requests will return the 429 status code. This will stay until the timestamp given in `RateLimit-Reset` has been reached. ### Example rate limiting response ```shell HTTP/1.0 429 TOO MANY REQUESTS Content-Length: 66 Content-Type: application/json; charset=utf-8 Date: Mon, 11 Nov 2019 11:11:33 GMT RateLimit-Limit: 210 RateLimit-Remaining: 0 RateLimit-Reset: 1573468299256 { \"id\": \"too_many_requests\", \"message\": \"API Rate limit exceeded.\" } ``` It is important to understand how rate limits are reset in order to use the API efficiently. Rate limits are reset for all counted requests at once. This means that that once the timestamp `RateLimit-Remaining` has arrived all counted request are reset and you can again start sending requests to the API. This allows for short burst of traffic. The downside is once you have hit the request limit no more requests are allowed until the rate limit duration is reset. ## Object Relations Relationships describe resource objects (storages, networks, IPs, etc.) that are connected to a server. These relationships are treated like objects themselves and can have properties specific to this relation. One example would be, that the MAC address of a private network connected to a server (Server-to-Network relation) can be found as property of the relation itself - the relation is the _network interface_ in the server. Another example is storage, where the SCSI LUN is also part of the Server-to-Storage relation object. This information is especially interesting if some kind of network boot is used on the servers, where the properties of the server need to be known beforehand. ## Deleted Objects Objects that are deleted are no longer visible on their *regular* endpoints. For historical reasons these objects are still available read-only on a special endpoint named /deleted. If objects have been deleted but have not yet been billed in the current period, the yet-to-be-billed price is still shown. <!-- #strip_js --> ## Node.js / Javascript Library We have a JavaScript library for you to use our API with ease. <a href=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js\"><img src=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js.svg\" alt=\"npm version\" height=\"18\"></a> <aside class=\"success\"> We want to make it even easier for you to manage your Infrastructure via our API - so feel free to contact us with any ideas, or languages you would like to see included. </aside> Requests with our Node.js lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://www.npmjs.com/package/@gridscale/gsclient-js\" target=\"_blank\">click here</a> . <!-- #strip_js_end --> <!-- #strip_go --> ## Golang Library We also have a Golang library for Gophers. Requests with our Golang lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://github.com/gridscale/gsclient-go\" target=\"_blank\">click here</a> . <!-- #strip_go_end --> <!-- #strip_python --> ## Python Library We have a Python library, that optionally also simplifies handling of asynchronous requests by mimicking synchronous blocking behaviour. To get started <a href=\"https://pypi.org/project/gs-api-client/\" target=\"_blank\">click here</a> . <!-- #strip_python_end --> # Authentication In order to use the API, the User-UUID and an API_Token are required. Both are available via the web GUI which can be found here on <a href=\"https://my.gridscale.io/APIs/\" target=\"_blank\">Your Account</a> <aside class=\"success\"> If you are logged in, your UUID and Token will be pulled dynamically from your account, so you can copy request examples straight into your code. </aside> The User-UUID remains the same, even if the users email address is changed. The API_Token is a randomly generated hash that allows read/write access. ## API_Token <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-Token </td></tr></tbody></table> ## User_UUID <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-UserId </td></tr></tbody></table> ## Examples <!-- #strip_js --> > Node.js ``` // to get started // read the docs @ https://www.npmjs.com/package/@gs_js_auth/api var gs_js_auth = require('@gs_js_auth/api').gs_js_auth; var client = new gs_js_auth.Client(\"##API_TOKEN##\",\"##USER_UUID##\"); ``` <!-- #strip_js_end --> <!-- #strip_go --> > Golang ``` // to get started // read the docs @ https://github.com/gridscale/gsclient-go config := gsclient.NewConfiguration( \"https://api.gridscale.io\", \"##USER_UUID##\", \"##API_TOKEN##\", false, //set debug mode ) client := gsclient.NewClient(config) ``` <!-- #strip_go_end --> > Shell Authentication Headers ``` -H \"X-Auth-UserId: ##USER_UUID##\" \\ -H \"X-Auth-Token: ##API_TOKEN##\" \\ ``` > Setting Authentication in your Environment variables ``` export API_TOKEN=\"##API_TOKEN##\" USER_UUID=\"##USER_UUID##\" ``` <aside class=\"notice\"> You must replace <code>USER_UUID</code> and <code>API_Token</code> with your personal UUID and API key respectively. </aside> # noqa: E501
OpenAPI spec version: 1.0.50
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from gs_api_client.swagger.models.snapshot_schedule import SnapshotSchedule # noqa: F401,E501
class SnapshotScheduleGetResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'snapshot_schedule': 'SnapshotSchedule'
}
attribute_map = {
'snapshot_schedule': 'snapshot_schedule'
}
def __init__(self, snapshot_schedule=None): # noqa: E501
"""SnapshotScheduleGetResponse - a model defined in Swagger""" # noqa: E501
self._snapshot_schedule = None
self.discriminator = None
if snapshot_schedule is not None:
self.snapshot_schedule = snapshot_schedule
@property
def snapshot_schedule(self):
"""Gets the snapshot_schedule of this SnapshotScheduleGetResponse. # noqa: E501
:return: The snapshot_schedule of this SnapshotScheduleGetResponse. # noqa: E501
:rtype: SnapshotSchedule
"""
return self._snapshot_schedule
@snapshot_schedule.setter
def snapshot_schedule(self, snapshot_schedule):
"""Sets the snapshot_schedule of this SnapshotScheduleGetResponse.
:param snapshot_schedule: The snapshot_schedule of this SnapshotScheduleGetResponse. # noqa: E501
:type: SnapshotSchedule
"""
self._snapshot_schedule = snapshot_schedule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SnapshotScheduleGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnapshotScheduleGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 137.381356
| 12,787
| 0.714515
|
3ce8afc6083763666bd9cdeb0210a0a7ae52bea3
| 144
|
py
|
Python
|
flaskprediction/__init__.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
flaskprediction/__init__.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | 1
|
2020-08-29T18:39:05.000Z
|
2020-08-30T09:43:47.000Z
|
flaskprediction/__init__.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
from flaskprediction import routes
| 24
| 61
| 0.819444
|
1764e879c1686e889d32c2a5373dd9a6b30d0cca
| 1,222
|
py
|
Python
|
emsapi/models/adi_ems_web_api_v2_dto_trajectory_configuration_py3.py
|
ge-flight-analytics/emsapi-python
|
2e3a53529758f1bd7a2a850119b1cc1b5ac552e3
|
[
"MIT"
] | null | null | null |
emsapi/models/adi_ems_web_api_v2_dto_trajectory_configuration_py3.py
|
ge-flight-analytics/emsapi-python
|
2e3a53529758f1bd7a2a850119b1cc1b5ac552e3
|
[
"MIT"
] | 2
|
2020-01-16T00:04:35.000Z
|
2021-05-26T21:04:06.000Z
|
emsapi/models/adi_ems_web_api_v2_dto_trajectory_configuration_py3.py
|
ge-flight-analytics/emsapi-python
|
2e3a53529758f1bd7a2a850119b1cc1b5ac552e3
|
[
"MIT"
] | 1
|
2021-02-23T08:25:12.000Z
|
2021-02-23T08:25:12.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoTrajectoryConfiguration(Model):
"""Encapsulates a data point defining the userOptions config for a Google
Earth export.
:param trajectory_id: A unique identifier for this type of KML trajectory.
Typically this just the name of the file sans extension.
:type trajectory_id: str
:param description: A description of what kind of trajectory this KML type
generates.
:type description: str
"""
_attribute_map = {
'trajectory_id': {'key': 'trajectoryId', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, *, trajectory_id: str=None, description: str=None, **kwargs) -> None:
super(AdiEmsWebApiV2DtoTrajectoryConfiguration, self).__init__(**kwargs)
self.trajectory_id = trajectory_id
self.description = description
| 38.1875
| 92
| 0.618658
|
e170d133600dd4617b205e2d1c75a446829ed0d2
| 6,796
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/LayerSpawner_InstancesPlantInAllSupportedShapes.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-13T00:01:12.000Z
|
2021-09-13T00:01:12.000Z
|
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/LayerSpawner_InstancesPlantInAllSupportedShapes.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/LayerSpawner_InstancesPlantInAllSupportedShapes.py
|
aaarsene/o3de
|
37e3b0226958974defd14dd6d808e8557dcd7345
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-20T11:07:25.000Z
|
2021-07-20T11:07:25.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import azlmbr
import azlmbr.legacy.general as general
import azlmbr.entity as EntityId
import azlmbr.math as math
sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests'))
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.editor_test_helper import EditorTestHelper
from largeworlds.large_worlds_utils import editor_dynveg_test_helper as dynveg
class TestLayerSpawner_AllShapesPlant(EditorTestHelper):
def __init__(self):
EditorTestHelper.__init__(self, log_prefix="TestLayerSpawner_AllShapesPlant", args=["level"])
def run_test(self):
"""
Summary:
The level is loaded and vegetation area is created. Then the Vegetation Reference Shape
component of vegetation area is pinned with entities of different shape components to check
if the vegetation plants in different shaped areas.
Expected Behavior:
Vegetation properly plants in areas of any shape.
Test Steps:
1) Create level
2) Create basic vegetation area entity and set the properties
3) Box Shape Entity: create, set properties and pin to vegetation
4) Capsule Shape Entity: create, set properties and pin to vegetation
5) Tube Shape Entity: create, set properties and pin to vegetation
6) Sphere Shape Entity: create, set properties and pin to vegetation
7) Cylinder Shape Entity: create, set properties and pin to vegetation
8) Prism Shape Entity: create, set properties and pin to vegetation
9) Compound Shape Entity: create, set properties and pin to vegetation
Note:
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def pin_shape_and_check_count(entity_id, count):
hydra.get_set_test(vegetation, 2, "Configuration|Shape Entity Id", entity_id)
result = self.wait_for_condition(lambda: dynveg.validate_instance_count_in_entity_shape(vegetation.id,
count), 2.0)
self.test_success = self.test_success and result
# 1) Create level
self.test_success = self.create_level(
self.args["level"],
heightmap_resolution=1024,
heightmap_meters_per_pixel=1,
terrain_texture_resolution=4096,
use_terrain=False,
)
# 2) Create basic vegetation area entity and set the properties
entity_position = math.Vector3(125.0, 136.0, 32.0)
asset_path = os.path.join("Slices", "PurpleFlower.dynamicslice")
vegetation = dynveg.create_vegetation_area("Instance Spawner",
entity_position,
10.0, 10.0, 10.0,
asset_path)
vegetation.remove_component("Box Shape")
vegetation.add_component("Vegetation Reference Shape")
# Create surface for planting on
dynveg.create_surface_entity("Surface Entity", entity_position, 60.0, 60.0, 1.0)
# Adjust camera to be close to the vegetation entity
general.set_current_view_position(135.0, 102.0, 39.0)
general.set_current_view_rotation(-15.0, 0, 0)
# 3) Box Shape Entity: create, set properties and pin to vegetation
box = hydra.Entity("box")
box.create_entity(math.Vector3(124.0, 126.0, 32.0), ["Box Shape"])
new_box_dimension = math.Vector3(10.0, 10.0, 1.0)
hydra.get_set_test(box, 0, "Box Shape|Box Configuration|Dimensions", new_box_dimension)
# This and subsequent counts are the number of "PurpleFlower" that spawn in the shape with given dimensions
pin_shape_and_check_count(box.id, 156)
# 4) Capsule Shape Entity: create, set properties and pin to vegetation
capsule = hydra.Entity("capsule")
capsule.create_entity(math.Vector3(120.0, 142.0, 32.0), ["Capsule Shape"])
hydra.get_set_test(capsule, 0, "Capsule Shape|Capsule Configuration|Height", 10.0)
hydra.get_set_test(capsule, 0, "Capsule Shape|Capsule Configuration|Radius", 2.0)
pin_shape_and_check_count(capsule.id, 20)
# 5) Tube Shape Entity: create, set properties and pin to vegetation
tube = hydra.Entity("tube")
tube.create_entity(math.Vector3(124.0, 136.0, 32.0), ["Tube Shape", "Spline"])
pin_shape_and_check_count(tube.id, 27)
# 6) Sphere Shape Entity: create, set properties and pin to vegetation
sphere = hydra.Entity("sphere")
sphere.create_entity(math.Vector3(112.0, 143.0, 32.0), ["Sphere Shape"])
hydra.get_set_test(sphere, 0, "Sphere Shape|Sphere Configuration|Radius", 5.0)
pin_shape_and_check_count(sphere.id, 122)
# 7) Cylinder Shape Entity: create, set properties and pin to vegetation
cylinder = hydra.Entity("cylinder")
cylinder.create_entity(math.Vector3(136.0, 143.0, 32.0), ["Cylinder Shape"])
hydra.get_set_test(cylinder, 0, "Cylinder Shape|Cylinder Configuration|Radius", 5.0)
hydra.get_set_test(cylinder, 0, "Cylinder Shape|Cylinder Configuration|Height", 5.0)
pin_shape_and_check_count(cylinder.id, 124)
# 8) Prism Shape Entity: create, set properties and pin to vegetation
polygon_prism = hydra.Entity("polygonprism")
polygon_prism.create_entity(math.Vector3(127.0, 142.0, 32.0), ["Polygon Prism Shape"])
pin_shape_and_check_count(polygon_prism.id, 20)
# 9) Compound Shape Entity: create, set properties and pin to vegetation
compound = hydra.Entity("Compound")
compound.create_entity(math.Vector3(125.0, 136.0, 32.0), ["Compound Shape"])
pte = hydra.get_property_tree(compound.components[0])
shapes = [box.id, capsule.id, tube.id, sphere.id, cylinder.id, polygon_prism.id]
for index in range(6):
pte.add_container_item("Configuration|Child Shape Entities", index, EntityId.EntityId())
for index, element in enumerate(shapes):
hydra.get_set_test(compound, 0, f"Configuration|Child Shape Entities|[{index}]", element)
pin_shape_and_check_count(compound.id, 469)
test = TestLayerSpawner_AllShapesPlant()
test.run()
| 49.246377
| 155
| 0.672013
|
1343e54bd73e19babd16c4cf6bb7b37271364629
| 5,189
|
py
|
Python
|
arcade/examples/sprite_bullets.py
|
janscas/arcade
|
d83dda946563429c8ee7d1a036bc0407758c638f
|
[
"MIT"
] | 824
|
2016-01-07T19:27:57.000Z
|
2020-08-01T03:15:47.000Z
|
arcade/examples/sprite_bullets.py
|
janscas/arcade
|
d83dda946563429c8ee7d1a036bc0407758c638f
|
[
"MIT"
] | 646
|
2016-01-08T02:42:31.000Z
|
2020-08-03T14:13:27.000Z
|
arcade/examples/sprite_bullets.py
|
janscas/arcade
|
d83dda946563429c8ee7d1a036bc0407758c638f
|
[
"MIT"
] | 221
|
2016-01-07T22:36:33.000Z
|
2020-07-24T23:30:08.000Z
|
"""
Sprite Bullets
Simple program to show basic sprite usage.
Artwork from https://kenney.nl
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_bullets
"""
import random
import arcade
import os
SPRITE_SCALING_PLAYER = 0.5
SPRITE_SCALING_COIN = 0.2
SPRITE_SCALING_LASER = 0.8
COIN_COUNT = 50
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites and Bullets Example"
BULLET_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self):
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.player_list = None
self.coin_list = None
self.bullet_list = None
# Set up the player info
self.player_sprite = None
self.score = 0
# Don't show the mouse cursor
self.set_mouse_visible(False)
# Load sounds. Sounds from kenney.nl
self.gun_sound = arcade.load_sound(":resources:sounds/hurt5.wav")
self.hit_sound = arcade.load_sound(":resources:sounds/hit5.wav")
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
# Set up the player
self.score = 0
# Image from kenney.nl
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/"
"femalePerson_idle.png", SPRITE_SCALING_PLAYER)
self.player_sprite.center_x = 50
self.player_sprite.center_y = 70
self.player_list.append(self.player_sprite)
# Create the coins
for i in range(COIN_COUNT):
# Create the coin instance
# Coin image from kenney.nl
coin = arcade.Sprite(":resources:images/items/coinGold.png", SPRITE_SCALING_COIN)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(120, SCREEN_HEIGHT)
# Add the coin to the lists
self.coin_list.append(coin)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
self.clear()
# Draw all the sprites.
self.coin_list.draw()
self.bullet_list.draw()
self.player_list.draw()
# Render the text
arcade.draw_text(f"Score: {self.score}", 10, 20, arcade.color.WHITE, 14)
def on_mouse_motion(self, x, y, dx, dy):
"""
Called whenever the mouse moves.
"""
self.player_sprite.center_x = x
def on_mouse_press(self, x, y, button, modifiers):
"""
Called whenever the mouse button is clicked.
"""
# Gunshot sound
arcade.play_sound(self.gun_sound)
# Create a bullet
bullet = arcade.Sprite(":resources:images/space_shooter/laserBlue01.png", SPRITE_SCALING_LASER)
# The image points to the right, and we want it to point up. So
# rotate it.
bullet.angle = 90
# Give the bullet a speed
bullet.change_y = BULLET_SPEED
# Position the bullet
bullet.center_x = self.player_sprite.center_x
bullet.bottom = self.player_sprite.top
# Add the bullet to the appropriate lists
self.bullet_list.append(bullet)
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on bullet sprites
self.bullet_list.update()
# Loop through each bullet
for bullet in self.bullet_list:
# Check this bullet to see if it hit a coin
hit_list = arcade.check_for_collision_with_list(bullet, self.coin_list)
# If it did, get rid of the bullet
if len(hit_list) > 0:
bullet.remove_from_sprite_lists()
# For every coin we hit, add to the score and remove the coin
for coin in hit_list:
coin.remove_from_sprite_lists()
self.score += 1
# Hit Sound
arcade.play_sound(self.hit_sound)
# If the bullet flies off-screen, remove it.
if bullet.bottom > SCREEN_HEIGHT:
bullet.remove_from_sprite_lists()
def main():
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 29.151685
| 103
| 0.621507
|
97cdac24f052deec3cde2cddb5ed058de2028756
| 19,496
|
py
|
Python
|
PyRoute/Galaxy.py
|
elsiehupp/traveller_pyroute
|
32a43665910894896b807576125acee56ef02797
|
[
"MIT"
] | 12
|
2017-02-09T08:58:16.000Z
|
2021-09-04T22:12:57.000Z
|
PyRoute/Galaxy.py
|
elsiehupp/traveller_pyroute
|
32a43665910894896b807576125acee56ef02797
|
[
"MIT"
] | 23
|
2017-07-14T05:04:30.000Z
|
2022-03-27T02:20:06.000Z
|
PyRoute/Galaxy.py
|
elsiehupp/traveller_pyroute
|
32a43665910894896b807576125acee56ef02797
|
[
"MIT"
] | 4
|
2016-12-31T06:23:47.000Z
|
2022-03-03T19:36:43.000Z
|
"""
Created on Mar 2, 2014
@author: tjoneslo
"""
import logging
import re
import codecs
import os
import ast
import itertools
import math
import networkx as nx
from Star import Star
from TradeCalculation import TradeCalculation, NoneCalculation, CommCalculation, XRouteCalculation, \
OwnedWorldCalculation
from StatCalculation import ObjectStatistics
from AllyGen import AllyGen
class AreaItem(object):
def __init__(self, name):
self.name = name
self.worlds = []
self.stats = ObjectStatistics()
self.alg = {}
self.alg_sorted = []
self._wiki_name = '[[{}]]'.format(name)
def wiki_title(self):
return self.wiki_name()
def wiki_name(self):
return self._wiki_name
def __str__(self):
return self.name
def world_count(self):
return len(self.worlds)
class Allegiance(AreaItem):
def __init__(self, code, name, base=False, population='Huma'):
super(Allegiance, self).__init__(Allegiance.allegiance_name(name, code, base))
self.code = code
self.base = base
self.population = population
self._wiki_name = Allegiance.set_wiki_name(name, code, base)
# For the JSONPickel work
def __getstate__(self):
state = self.__dict__.copy()
del state['alg_sorted']
return state
@staticmethod
def allegiance_name(name, code, base):
if base:
return name
names = name.split(',') if ',' in name else [name, '']
if code.startswith('Na'):
return '{} {}'.format(names[0], names[1].strip())
elif code.startswith('Cs'):
return '{}s of the {}'.format(names[0].strip(), names[1].strip())
elif ',' in name:
return '{}, {}'.format(names[0].strip(), names[1].strip())
return '{}'.format(name.strip())
@staticmethod
def set_wiki_name(name, code, base):
names = name.split(',') if ',' in name else [name, '']
if code.startswith('Na'):
return '[[{}]] {}'.format(names[0].strip(), names[1].strip())
elif code.startswith('Cs'):
return '[[{}]]s of the [[{}]]'.format(names[0].strip(), names[1].strip())
elif ',' in name:
if base:
return '[[{}]]'.format(names[0].strip())
else:
return '[[{}]], [[{}]]'.format(names[0].strip(), names[1].strip())
return '[[{}]]'.format(name.strip())
def __str__(self):
return '{} ([{})'.format(self.name, self.code)
def is_unclaimed(self):
return AllyGen.is_unclaimed(self)
def is_wilds(self):
return AllyGen.is_wilds(self)
def is_client_state(self):
return AllyGen.is_client_state(self)
def are_allies(self, other):
return AllyGen.are_allies(self.code, other.code)
class Subsector(AreaItem):
def __init__(self, name, position, sector):
super(Subsector, self).__init__(name)
self.positions = ["ABCD", "EFGH", "IJKL", "MNOP"]
self.sector = sector
self.position = position
self.spinward = None
self.trailing = None
self.coreward = None
self.rimward = None
self.dx = sector.dx
self.dy = sector.dy
self._wiki_name = Subsector.set_wiki_name(name, sector.name, position)
# For the JSONPickel work
def __getstate__(self):
state = self.__dict__.copy()
del state['sector']
del state['spinward']
del state['trailing']
del state['coreward']
del state['rimward']
del state['alg_sorted']
del state['positions']
return state
@staticmethod
def set_wiki_name(name, sector_name, position):
if len(name) == 0:
return "{0} location {1}".format(sector_name, position)
else:
if "(" in name:
return '[[{0} Subsector|{1}]]'.format(name, name[:-7])
else:
return '[[{0} Subsector|{0}]]'.format(name)
def wiki_title(self):
return '{0} - {1}'.format(self.wiki_name(), self.sector.wiki_name())
def subsector_name(self):
if len(self.name) == 0:
return "Location {}".format(self.position)
else:
return self.name[:-9] if self.name.endswith('Subsector') else self.name
def set_bounding_subsectors(self):
posrow = 0
for row in self.positions:
if self.position in row:
pos = self.positions[posrow].index(self.position)
break
posrow += 1
if posrow == 0:
self.coreward = self.sector.coreward.subsectors[self.positions[3][pos]] if self.sector.coreward else None
else:
self.coreward = self.sector.subsectors[self.positions[posrow - 1][pos]]
if pos == 0:
self.spinward = self.sector.spinward.subsectors[self.positions[posrow][3]] if self.sector.spinward else None
else:
self.spinward = self.sector.subsectors[self.positions[posrow][pos - 1]]
if posrow == 3:
self.rimward = self.sector.rimward.subsectors[self.positions[0][pos]] if self.sector.rimward else None
else:
self.rimward = self.sector.subsectors[self.positions[posrow + 1][pos]]
if pos == 3:
self.trailing = self.sector.trailing.subsectors[self.positions[posrow][0]] if self.sector.trailing else None
else:
self.trailing = self.sector.subsectors[self.positions[posrow][pos + 1]]
class Sector(AreaItem):
def __init__(self, name, position):
# The name as passed from the Galaxy read include the comment marker at the start of the line
# So strip the comment marker, then strip spaces.
super(Sector, self).__init__(name[1:].strip())
self._wiki_name = '[[{0} Sector|{0}]]'.format(self.sector_name())
# Same here, the position has a leading comment marker
self.x = int(position[1:].split(',')[0])
self.y = int(position[1:].split(',')[1])
self.dx = self.x * 32
self.dy = self.y * 40
self.subsectors = {}
self.spinward = None
self.trailing = None
self.coreward = None
self.rimward = None
# For the JSONPickel work
def __getstate__(self):
state = self.__dict__.copy()
del state['spinward']
del state['trailing']
del state['coreward']
del state['rimward']
del state['alg_sorted']
return state
def __str__(self):
return '{} ({},{})'.format(self.name, str(self.x), str(self.y))
def sector_name(self):
return self.name[:-7] if self.name.endswith('Sector') else self.name
def find_world_by_pos(self, pos):
for world in self.worlds:
if world.position == pos:
return world
return None
class Galaxy(AreaItem):
"""
classdocs
"""
def __init__(self, min_btn, max_jump=4, route_btn=8):
"""
Constructor
"""
super(Galaxy, self).__init__('Charted Space')
self.logger = logging.getLogger('PyRoute.Galaxy')
self.stars = nx.Graph()
self.ranges = nx.Graph()
self.sectors = {}
self.borders = AllyGen(self)
self.output_path = 'maps'
self.max_jump_range = max_jump
self.min_btn = min_btn
self.route_btn = route_btn
# For the JSONPickel work
def __getstate__(self):
state = self.__dict__.copy()
del state['stars']
del state['ranges']
del state['borders']
del state['logger']
del state['trade']
del state['sectors']
del state['alg_sorted']
return state
def read_sectors(self, sectors, pop_code, ru_calc):
for sector in sectors:
try:
lines = [line for line in codecs.open(sector, 'r', 'utf-8')]
except (OSError, IOError):
self.logger.error("sector file %s not found" % sector)
continue
self.logger.debug('reading %s ' % sector)
sec = Sector(lines[3], lines[4])
sec.filename = os.path.basename(sector)
for lineno, line in enumerate(lines):
if line.startswith('Hex'):
break
if line.startswith('# Subsector'):
data = line[11:].split(':', 1)
pos = data[0].strip()
name = data[1].strip()
sec.subsectors[pos] = Subsector(name, pos, sec)
if line.startswith('# Alleg:'):
alg_code = line[8:].split(':', 1)[0].strip()
alg_name = line[8:].split(':', 1)[1].strip().strip('"')
# A work around for the base Na codes which may be empire dependent.
alg_race = AllyGen.population_align(alg_code, alg_name)
base = AllyGen.same_align(alg_code)
if base not in self.alg:
self.alg[base] = Allegiance(base, AllyGen.same_align_name(base, alg_name), base=True, population=alg_race)
if alg_code not in self.alg:
self.alg[alg_code] = Allegiance(alg_code, alg_name, base=False, population=alg_race)
for line in lines[lineno + 2:]:
if line.startswith('#') or len(line) < 20:
continue
star = Star.parse_line_into_star(line, sec, pop_code, ru_calc)
if star:
sec.worlds.append(star)
sec.subsectors[star.subsector()].worlds.append(star)
star.alg_base_code = AllyGen.same_align(star.alg_code)
self.set_area_alg(star, self, self.alg)
self.set_area_alg(star, sec, self.alg)
self.set_area_alg(star, sec.subsectors[star.subsector()], self.alg)
star.tradeCode.sophont_list.append("{}A".format(self.alg[star.alg_code].population))
self.sectors[sec.name] = sec
self.logger.info("Sector {} loaded {} worlds".format(sec, len(sec.worlds)))
self.set_bounding_sectors()
self.set_bounding_subsectors()
self.set_positions()
self.logger.debug("Allegiances: {}".format(self.alg))
def set_area_alg(self, star, area, algs):
full_alg = algs.get(star.alg_code, Allegiance(star.alg_code, 'Unknown Allegiance', base=False))
base_alg = algs.get(star.alg_base_code, Allegiance(star.alg_base_code, 'Unknown Allegiance', base=True))
area.alg.setdefault(star.alg_base_code, Allegiance(base_alg.code, base_alg.name, base=True)).worlds.append(star)
if star.alg_code != star.alg_base_code:
area.alg.setdefault(star.alg_code, Allegiance(full_alg.code, full_alg.name, base=False)).worlds.append(star)
def set_positions(self):
for sector in self.sectors.values():
for star in sector.worlds:
self.stars.add_node(star)
self.ranges.add_node(star)
self.logger.info("Total number of worlds: %s" % self.stars.number_of_nodes())
def set_bounding_sectors(self):
for sector, neighbor in itertools.combinations(self.sectors.values(), 2):
if sector.x - 1 == neighbor.x and sector.y == neighbor.y:
sector.spinward = neighbor
neighbor.trailing = sector
elif sector.x + 1 == neighbor.x and sector.y == neighbor.y:
sector.trailing = neighbor
neighbor.spinward = sector
elif sector.x == neighbor.x and sector.y - 1 == neighbor.y:
sector.rimward = neighbor
neighbor.coreward = sector
elif sector.x == neighbor.x and sector.y + 1 == neighbor.y:
sector.coreward = neighbor
neighbor.rimward = sector
elif sector.x == neighbor.x and sector.y == neighbor.y:
self.logger.error("Duplicate sector %s and %s" % (sector.name, neighbor.name))
def set_bounding_subsectors(self):
for sector in self.sectors.values():
for subsector in sector.subsectors.values():
subsector.set_bounding_subsectors()
def generate_routes(self, routes, reuse=10):
if routes == 'trade':
self.trade = TradeCalculation(self, self.min_btn, self.route_btn, reuse)
elif routes == 'comm':
self.trade = CommCalculation(self, reuse)
elif routes == 'xroute':
self.trade = XRouteCalculation(self)
elif routes == 'owned':
self.trade = OwnedWorldCalculation(self)
elif routes == 'none':
self.trade = NoneCalculation(self)
self.trade.generate_routes()
def set_borders(self, border_gen, match):
self.logger.info('setting borders...')
if border_gen == 'range':
self.borders.create_borders(match)
elif border_gen == 'allygen':
self.borders.create_ally_map(match)
elif border_gen == 'erode':
self.borders.create_erode_border(match)
else:
pass
def write_routes(self, routes=None):
path = os.path.join(self.output_path, 'ranges.txt')
with open(path, "wb") as f:
nx.write_edgelist(self.ranges, f, data=True)
path = os.path.join(self.output_path, 'stars.txt')
with open(path, "wb") as f:
nx.write_edgelist(self.stars, f, data=True)
path = os.path.join(self.output_path, 'borders.txt')
with codecs.open(path, "wb", "utf-8") as f:
for key, value in self.borders.borders.items():
f.write("{}-{}: border: {}\n".format(key[0], key[1], value))
if routes == 'xroute':
path = os.path.join(self.output_path, 'stations.txt')
with codecs.open(path, "wb", 'utf-8') as f:
stars = [star for star in self.stars if star.tradeCount > 0]
for star in stars:
f.write("{} - {}\n".format(star, star.tradeCount))
def process_eti(self):
self.logger.info("Processing ETI for worlds")
for (world, neighbor) in self.stars.edges():
distance = world.hex_distance(neighbor)
distanceMod = int(distance / 2)
CargoTradeIndex = int(round(math.sqrt(
max(world.eti_cargo - distanceMod, 0) *
max(neighbor.eti_cargo - distanceMod, 0))))
PassTradeIndex = int(round(math.sqrt(
max(world.eti_passenger - distanceMod, 0) *
max(neighbor.eti_passenger - distanceMod, 0))))
self.stars[world][neighbor]['CargoTradeIndex'] = CargoTradeIndex
self.stars[world][neighbor]['PassTradeIndex'] = PassTradeIndex
if CargoTradeIndex > 0:
world.eti_cargo_volume += math.pow(10, CargoTradeIndex) * 10
neighbor.eti_cargo_volume += math.pow(10, CargoTradeIndex) * 10
world.eti_worlds += 1
neighbor.eti_worlds += 1
if PassTradeIndex > 0:
world.eti_pass_volume += math.pow(10, PassTradeIndex) * 2.5
neighbor.eti_pass_volume += math.pow(10, PassTradeIndex) * 2.5
def read_routes(self, routes=None):
route_regex = "^({1,}) \(({3,}) (\d\d\d\d)\) ({1,}) \(({3,}) (\d\d\d\d)\) (\{.*\})"
routeline = re.compile(route_regex)
path = os.path.join(self.output_path, 'ranges.txt')
with open(path, "wb") as f:
for line in f:
data = routeline.match(line).group()
sec1 = data[2].strip()
hex1 = data[3]
sec2 = data[4].strip()
hex2 = data[5]
routeData = ast.literal_eval(data[6])
world1 = self.sectors[sec1].find_world_by_pos(hex1)
world2 = self.sectors[sec2].find_world_by_pos(hex2)
self.ranges.add_edge_from([world1, world2, routeData])
def process_owned_worlds(self):
ow_names = os.path.join(self.output_path, 'owned-worlds-names.csv')
ow_list = os.path.join(self.output_path, 'owned-worlds-list.csv')
with codecs.open(ow_names, 'w+', 'utf-8') as f, codecs.open(ow_list, 'w+', 'utf-8') as g:
for world in self.stars:
if world.ownedBy == world:
continue
ownedBy = [star for star in self.stars.neighbors(world) \
if star.tl >= 9 and star.popCode >= 6 and \
star.port in 'ABC' and star.ownedBy == star and \
AllyGen.are_owned_allies(star.alg_code, world.alg_code)]
ownedBy.sort(reverse=True,
key=lambda star: star.popCode)
ownedBy.sort(reverse=True,
key=lambda star: star.importance - (star.hex_distance(world) - 1))
owner = None
if world.ownedBy is None:
owner = None
elif world.ownedBy == 'Mr':
owner = 'Mr'
elif world.ownedBy == 'Re':
owner = 'Re'
elif world.ownedBy == 'Px':
owner = 'Px'
elif len(world.ownedBy) > 4:
ownedSec = world.ownedBy[0:4]
ownedHex = world.ownedBy[5:]
owner = None
self.logger.debug(
"World {}@({},{}) owned by {} - {}".format(world, world.col, world.row, ownedSec, ownedHex))
if world.col < 4 and world.sector.spinward:
owner = world.sector.spinward.find_world_by_pos(ownedHex)
elif world.col > 28 and world.sector.trailing:
owner = world.sector.trailing.find_world_by_pos(ownedHex)
if world.row < 4 and owner is None and world.sector.coreward:
owner = world.sector.coreward.find_world_by_pos(ownedHex)
elif world.row > 36 and owner is None and world.sector.rimward:
owner = world.sector.rimward.find_world_by_pos(ownedHex)
# If we can't find world in the sector next door, try the this one
if owner is None:
owner = world.sector.find_world_by_pos(ownedHex)
elif len(world.ownedBy) == 4:
owner = world.sector.find_world_by_pos(world.ownedBy)
self.logger.debug("Worlds {} is owned by {}".format(world, owner))
ow_path_items = ['"{}"'.format(world), '"{}"'.format(owner)]
ow_path_items.extend(['"{}"'.format(item) for item in ownedBy[0:4]])
ow_path_world = ', '.join(ow_path_items)
f.write(ow_path_world + '\n')
ow_list_items = [
'"{}"'.format(world.sector.name[0:4]),
'"{}"'.format(world.position),
'"{}"'.format(owner)
]
ow_list_items.extend(['"O:{}"'.format(item.sec_pos(world.sector)) for item in ownedBy[0:4]])
ow_list_world = ', '.join(ow_list_items)
g.write(ow_list_world + '\n')
world.ownedBy = (owner, ownedBy[0:4])
| 39.706721
| 130
| 0.559756
|
ac3c8483acd685187bb1fae82a7cebd6feee8f8f
| 6,195
|
py
|
Python
|
kmeans.py
|
sylvarant/learn-to-hash
|
1acd92935fe4f1c508d35c9822d591a1a25ab749
|
[
"MIT"
] | 58
|
2019-10-30T05:25:46.000Z
|
2022-03-01T05:07:23.000Z
|
kmeans.py
|
sylvarant/learn-to-hash
|
1acd92935fe4f1c508d35c9822d591a1a25ab749
|
[
"MIT"
] | 7
|
2019-12-23T13:51:48.000Z
|
2021-01-20T05:26:32.000Z
|
kmeans.py
|
sylvarant/learn-to-hash
|
1acd92935fe4f1c508d35c9822d591a1a25ab749
|
[
"MIT"
] | 10
|
2019-11-02T07:24:39.000Z
|
2021-08-06T14:08:46.000Z
|
import numpy as np
import torch
import random
import sys
import utils
import pdb
'''
Class for running Lloyd iterations for k-means and associated utility functions.
'''
chunk_size = 8192
num_iterations = 60
k = 10
device = utils.device
device_cpu = torch.device('cpu')
class FastKMeans:
def __init__(self, dataset, n_clusters, opt):
if isinstance(dataset, np.ndarray):
dataset = torch.from_numpy(dataset).to(utils.device)
self.centers, self.codes = self.build_kmeans(dataset, n_clusters)
self.centers_norm = torch.sum(self.centers**2, dim=0).view(1,-1).to(utils.device)
self.opt = opt
#self.k = opt.k
'''
Creates kmeans
'''
def build_kmeans(self, dataset, num_centers):
return build_kmeans(dataset, num_centers)
'''
Input: query. tensor, batched query.
Returns:
-indices of nearest centers
'''
def predict(self, query, k):
#query = query.to(utils.device)
if isinstance(query, np.ndarray):
query = torch.from_numpy(query).to(utils.device)
#self centers have dimension 1, torch.Size([100, 1024])
if hasattr(self, 'opt') and (self.opt.glove or self.opt.sift) and self.centers.size(1) > 512:
centers = self.centers.t()
idx = utils.dist_rank(query, k, data_y=centers, largest=False)
else:
q_norm = torch.sum(query ** 2, dim=1).view(-1, 1)
dist = q_norm + self.centers_norm - 2*torch.mm(query, self.centers)
if k > dist.size(1):
k = dist.size(1)
_, idx = torch.topk(dist, k=k, dim=1, largest=False)
#move predict to numpy
idx = idx.cpu().numpy()
return idx
def eval_kmeans(queries, centers, codes):
centers_norms = torch.sum(centers ** 2, dim=0).view(1, -1)
queries_norms = torch.sum(queries ** 2, dim=1).view(-1, 1)
distances = torch.mm(queries, centers)
distances *= -2.0
distances += queries_norms
distances += centers_norms
codes = codes.to(device_cpu)
#counts of points per center. To compute # of candidates.
cnt = torch.zeros(num_centers, dtype=torch.long)
bins = [[]] * num_centers
for i in range(num_points):
cnt[codes[i]] += 1 #don't recompute!!
bins[codes[i]].append(i)
num_queries = answers.size()[0]
for num_probes in range(1, num_centers + 1):
#ranking of indices to nearest centers
_, probes = torch.topk(distances, num_probes, dim=1, largest=False)
probes = probes.to(device_cpu)
total_score = 0
total_candidates = 0
for i in range(num_queries):
candidates = []
#set of predicted bins
tmp = set()
for j in range(num_probes):
candidates.append(cnt[probes[i, j]])
tmp.add(int(probes[i, j]))
overall_candidates = sum(candidates)
score = 0
for j in range(k):
if int(codes[answers[i, j]]) in tmp:
score += 1
total_score += score
total_candidates += overall_candidates
print(num_probes, float(total_score) / float(k * num_queries), float(total_candidates) / float(num_queries))
'''
Input:
-dataset
Returns:
-centers. MUST ensure num_centers < len(dataset)
-codes.
'''
def build_kmeans(dataset, num_centers):
num_points = dataset.size()[0]
if num_centers > num_points:
print('WARNING: num_centers > num_points! Setting num_centers = num_points')
num_centers = num_points
dimension = dataset.size()[1]
centers = torch.zeros(num_centers, dimension, dtype=torch.float).to(device)
used = torch.zeros(num_points, dtype=torch.long)
for i in range(num_centers):
while True:
cur_id = random.randint(0, num_points - 1)
if used[cur_id] > 0:
continue
used[cur_id] = 1
centers[i] = dataset[cur_id]
break
centers = torch.transpose(centers, 0, 1)
new_centers = torch.zeros(num_centers, dimension, dtype=torch.float).to(device)
cnt = torch.zeros(num_centers, dtype=torch.float).to(device)
all_ones = torch.ones(chunk_size, dtype=torch.float).to(device)
if num_points % chunk_size != 0:
all_ones_last = torch.ones(num_points % chunk_size, dtype=torch.float).to(device)
all_ones_cnt = torch.ones(num_centers, dtype=torch.float).to(device)
codes = torch.zeros(num_points, dtype=torch.long).to(device)
for it in range(num_iterations):
centers_norms = torch.sum(centers ** 2, dim=0).view(1, -1)
new_centers.fill_(0.0)
cnt.fill_(0.0)
for i in range(0, num_points, chunk_size):
begin = i
end = min(i + chunk_size, num_points)
dataset_piece = dataset[begin:end, :]
dataset_norms = torch.sum(dataset_piece ** 2, dim=1).view(-1, 1)
distances = torch.mm(dataset_piece, centers)
distances *= -2.0
distances += dataset_norms
distances += centers_norms
_, min_ind = torch.min(distances, dim=1)
codes[begin:end] = min_ind
new_centers.scatter_add_(0, min_ind.view(-1, 1).expand(-1, dimension), dataset_piece)
if end - begin == chunk_size:
cnt.scatter_add_(0, min_ind, all_ones)
else:
cnt.scatter_add_(0, min_ind, all_ones_last)
if it + 1 == num_iterations:
break
cnt = torch.where(cnt > 1e-3, cnt, all_ones_cnt)
new_centers /= cnt.view(-1, 1)
centers = torch.transpose(new_centers, 0, 1).clone()
#eval_kmeans(queries, centers, codes)
return centers, codes
if __name__ == '__main__':
dataset_numpy = np.load('dataset.npy')
queries_numpy = np.load('queries.npy')
answers_numpy = np.load('answers.npy')
dataset = torch.from_numpy(dataset_numpy).to(device)
queries = torch.from_numpy(queries_numpy).to(device)
answers = torch.from_numpy(answers_numpy)
| 35.603448
| 116
| 0.60226
|
1e0de00f03477c636154dbcb3a44f30607f55686
| 9,050
|
py
|
Python
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/quantize_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/quantize_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/nlp/Transformer_for_TensorFlow/noahnmt/utils/quantize_utils.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# coding=utf-8
# Copyright Huawei Noah's Ark Lab.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synchronize replicas for model average training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
import tensorflow as tf
def clip_matmul_inputs(params, mode):
tf.logging.info("Add clip_by_value to inputs of MatMul")
value = tf.constant(float(params["clip_gemm.value"]))
batch_matmul = params["clip_gemm.batch_matmul"]
# if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
decay = tf.train.exponential_decay(
value,
(tf.minimum(global_step, params["clip_gemm.stop_decay_at"]) - params["clip_gemm.start_decay_step"]),
params["clip_gemm.decay_steps"],
params["clip_gemm.decay_rate"],
staircase=params["clip_gemm.staircase"])
decay = tf.maximum(decay, params["clip_gemm.min_value"])
decay_value = tf.cond(
global_step < params["clip_gemm.start_decay_step"],
lambda: value,
lambda: decay)
# else:
# decay_value = value
visited = {}
count = 0
graph = tf.get_default_graph()
graph_context = graph._get_control_flow_context()
for op in graph.get_operations():
if op.type == "MatMul" or (batch_matmul and op.type == "BatchMatMul"):
inputs = op.inputs
count += 1
assert len(inputs) == 2
for i, inp in enumerate(inputs):
assert inp.dtype == tf.float32
if inp.name in visited:
op._update_input(i, visited[inp.name])
else:
with tf.name_scope(op.name + "/input_%d"%i), tf.device(op.device):
# set context
context = inp.op._control_flow_context
if context:
context.Enter()
# create op
clip_inp = tf.clip_by_value(inp, -decay_value, decay_value)
# reset context
if context:
context.Exit()
# update matmul inputs
op._update_input(i, clip_inp)
visited[inp.name] = clip_inp
tf.logging.info("%d inputs of %d MatMuls are clipped" % (len(visited), count))
def quantize8(clip_value, batch_matmul=False):
tf.logging.info("Quantize 8-bits to inputs of MatMul")
visited = {}
count = 0
# assert_ops = []
for op in tf.get_default_graph().get_operations():
if op.type == "MatMul" or (batch_matmul and op.type == "BatchMatMul"):
inputs = op.inputs
count += 1
unquant_mul = 1.
# valid = False
assert len(op.inputs) == 2
context = op._control_flow_context
if context:
context.Enter()
quant_inputs = []
for i, inp in enumerate(inputs):
assert inp.dtype == tf.float32
# valid = True
if inp.name in visited:
quant_inp, quant_mul = visited[inp.name]
else:
with tf.name_scope(op.name + "/input_%d"%i), tf.device(op.device):
# find max value
max_value = tf.reduce_max(tf.abs(inp))
quant_mul = 127. / max_value
quant_inp = tf.round(inp * quant_mul)
# check overflow
quant_inp = tf.cast(tf.cast(quant_inp, tf.int8), dtype=tf.int32)
quant_inputs.append(quant_inp)
visited[inp.name] = (quant_inp, quant_mul)
# update input
# op._update_input(i, quant_inp)
unquant_mul *= 1. / quant_mul
# unquant outputs
# if valid:
assert len(quant_inputs) == 2
if op.type == "MatMul":
quant_output = tf.matmul(
quant_inputs[0], quant_inputs[1],
transpose_a=op.get_attr("transpose_a"),
transpose_b=op.get_attr("transpose_b"))
elif batch_matmul and op.type == "BatchMatMul":
quant_output = tf.matmul(
quant_inputs[0], quant_inputs[1],
adjoint_a=op.get_attr("adj_x"),
adjoint_b=op.get_attr("adj_y"))
else:
raise ValueError("Unkown op type!")
assert len(op.outputs) == 1
output = op.outputs[0]
consumers = output.consumers()
assert len(consumers) > 0
# cast for overflow checking
unquant_output = unquant_mul * tf.cast(quant_output, dtype=tf.float32)
tensors_modified_count = tf.contrib.graph_editor.reroute_ts(
[unquant_output], [output], can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
del output
del op
if context:
context.Exit()
tf.logging.info("%d inputs of %d MatMuls are quantized" % (len(visited), count))
def quantize16(clip_value, batch_matmul=False):
tf.logging.info("Quantize 16-bits to inputs of MatMul")
BITS = 10
if clip_value:
BITS = int(clip_value)
quant_mul = float(2 ** BITS)
unquant_mul = 1. / (quant_mul * quant_mul)
visited = {}
count = 0
for op in tf.get_default_graph().get_operations():
if op.type == "MatMul" or (batch_matmul and op.type == "BatchMatMul"):
inputs = op.inputs
count += 1
unquant_mul = 1.
# valid = False
assert len(op.inputs) == 2
context = op._control_flow_context
if context:
context.Enter()
quant_inputs = []
for i, inp in enumerate(inputs):
assert inp.dtype == tf.float32
# valid = True
if inp.name in visited:
quant_inp = visited[inp.name]
else:
with tf.name_scope(op.name + "/input_%d"%i), tf.device(op.device):
quant_inp = tf.round(inp * quant_mul)
# check overflow
quant_inp = tf.cast(tf.cast(quant_inp, tf.int16), dtype=tf.int32)
visited[inp.name] = quant_inp
# update input
quant_inputs.append(quant_inp)
# op._update_input(i, quant_inp)
# unquant_mul *= 1. / quant_mul
# unquant outputs
# if valid:
assert len(quant_inputs) == 2
if op.type == "MatMul":
quant_output = tf.matmul(
quant_inputs[0], quant_inputs[1],
transpose_a=op.get_attr("transpose_a"),
transpose_b=op.get_attr("transpose_b"))
elif batch_matmul and op.type == "BatchMatMul":
quant_output = tf.matmul(
quant_inputs[0], quant_inputs[1],
adjoint_a=op.get_attr("adj_x"),
adjoint_b=op.get_attr("adj_y"))
else:
raise ValueError("Unkown op type!")
assert len(op.outputs) == 1
output = op.outputs[0]
consumers = output.consumers()
assert len(consumers) > 0
# cast for overflow checking
unquant_output = unquant_mul * tf.cast(quant_output, dtype=tf.float32)
# use graph editor
tensors_modified_count = tf.contrib.graph_editor.reroute_ts(
[unquant_output], [output], can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
del output
del op
if context:
context.Exit()
tf.logging.info("%d inputs of %d MatMuls are quantized" % (len(visited), count))
| 34.942085
| 106
| 0.629392
|
3eb3a13f687ffe28bd2d89272f0f31cfc11b4323
| 100
|
py
|
Python
|
tests/pip/data/stpkg/stpkg/submod.py
|
mrshu/stash
|
773d15b8fb3853a65c15fe160bf5584c99437170
|
[
"MIT"
] | 1,822
|
2015-01-10T21:46:31.000Z
|
2022-03-28T13:07:15.000Z
|
tests/pip/data/stpkg/stpkg/submod.py
|
geoffrey777/stash
|
2cbe1d494e62af1d98cd4f1fcaca20e894b249d0
|
[
"MIT"
] | 362
|
2015-01-01T11:06:17.000Z
|
2022-03-29T14:54:13.000Z
|
tests/pip/data/stpkg/stpkg/submod.py
|
geoffrey777/stash
|
2cbe1d494e62af1d98cd4f1fcaca20e894b249d0
|
[
"MIT"
] | 261
|
2015-01-10T21:45:17.000Z
|
2022-03-28T11:27:21.000Z
|
# -*- coding: utf-8 -*-
"""submodule test"""
def main():
print("local pip test successfull!")
| 14.285714
| 40
| 0.58
|
fb00bf0516332122a499f01330c65e012f2ae15a
| 2,565
|
py
|
Python
|
tools/external/amitools/amitools/tools/fdtool.py
|
pararaum/-alpine-amiga_examples
|
fd81af3cca204ba543dac8ef697aa41839b80155
|
[
"BSD-2-Clause"
] | 84
|
2016-04-12T02:00:32.000Z
|
2022-02-22T04:51:17.000Z
|
tools/external/amitools/amitools/tools/fdtool.py
|
pararaum/-alpine-amiga_examples
|
fd81af3cca204ba543dac8ef697aa41839b80155
|
[
"BSD-2-Clause"
] | 1
|
2021-06-27T09:59:04.000Z
|
2021-06-27T09:59:04.000Z
|
tools/external/amitools/amitools/tools/fdtool.py
|
pararaum/-alpine-amiga_examples
|
fd81af3cca204ba543dac8ef697aa41839b80155
|
[
"BSD-2-Clause"
] | 16
|
2016-09-22T14:44:05.000Z
|
2021-12-25T08:03:56.000Z
|
#!/usr/bin/env python2.7
#
# fdtool <file.fd> ...
#
import sys
import argparse
import amitools.fd.FDFormat as FDFormat
# ----- dump -----
def dump(fname, fd, add_private):
print(fname)
print(" base: %s" % fd.get_base_name())
funcs = fd.get_funcs()
num = 1
for f in funcs:
if add_private or not f.is_private():
bias = f.get_bias()
print(" #%04d %5d 0x%04x %30s %s" % (num,bias,bias,f.get_name(),f.get_arg_str()))
num += 1
# ----- generate -----
def generate_python_code(fd, add_private):
funcs = fd.get_funcs()
for f in funcs:
if add_private or not f.is_private():
args = f.get_args()
if len(args)>0:
args = tuple(args)
else:
args = None
print " (%d, '%s', %s)," % (f.get_bias(),f.get_name(),args)
def generate_sasc_code(fname, fd, add_private, prefix=""):
funcs = fd.get_funcs()
fo = open(fname, "w")
for f in funcs:
if add_private or not f.is_private():
line = "__asm __saveds int %s%s(" % (prefix, f.get_name())
args = f.get_args()
if args != None:
for a in args:
line += "register __%s int %s" % (a[1],a[0])
if a != args[-1]:
line += ", "
else:
line += " void "
line += " )"
fo.write(line)
fo.write("{\n return 0;\n}\n\n")
fo.close()
# ----- main -----
def main():
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('files', nargs='+')
parser.add_argument('-P', '--add-private', action='store_true', default=False, help="add private functions")
parser.add_argument('-p', '--gen-python', action='store_true', default=False, help="generate python code for vamos")
parser.add_argument('-f', '--gen-fd', action='store', default=None, help="generate a new fd file")
parser.add_argument('-c', '--gen-sasc', action='store', default=None, help="generate SAS C code file")
parser.add_argument('-E', '--prefix', action='store', default='', help="add prefix to functions in C")
args = parser.parse_args()
# main loop
files = args.files
for fname in files:
fd = FDFormat.read_fd(fname)
code_gen = False
if args.gen_python:
generate_python_code(fd, args.add_private)
code_gen = True
if args.gen_sasc:
generate_sasc_code(args.gen_sasc, fd, args.add_private, args.prefix)
code_gen = True
if args.gen_fd != None:
FDFormat.write_fd(args.gen_fd, fd, args.add_private)
code_gen = True
if not code_gen:
dump(fname, fd, args.add_private)
if __name__ == '__main__':
main()
| 29.147727
| 118
| 0.604288
|
da8d695c59b9ebe83ef564dbf9e0d4e0d7a2f0a6
| 11,447
|
py
|
Python
|
venv/Lib/site-packages/pandas/core/arrays/sparse/accessor.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
venv/Lib/site-packages/pandas/core/arrays/sparse/accessor.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
venv/Lib/site-packages/pandas/core/arrays/sparse/accessor.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
|
c8a0c7ca5a1bcf2730ae9587bcddfebe323965a3
|
[
"MIT"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
"""Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
result = _coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo
A, rows, columns = _sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
return np.mean([column.array.density for _, column in self._parent.items()])
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import ensure_index
import pandas.core.indexes.base as ibase
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| 30.444149
| 86
| 0.545208
|
781e3f61481bb330956ed86d4f1b0335f6e98e22
| 3,633
|
py
|
Python
|
utils/spike-push-file-to-slot.py
|
SpudGunMan/spikedev
|
ab1860b6c85239c96172b3a4a4b96d13c9a65d3d
|
[
"MIT"
] | 23
|
2020-01-12T02:59:03.000Z
|
2022-03-16T14:00:56.000Z
|
utils/spike-push-file-to-slot.py
|
SpudGunMan/spikedev
|
ab1860b6c85239c96172b3a4a4b96d13c9a65d3d
|
[
"MIT"
] | 2
|
2020-04-20T02:16:59.000Z
|
2021-11-28T22:08:31.000Z
|
utils/spike-push-file-to-slot.py
|
SpudGunMan/spikedev
|
ab1860b6c85239c96172b3a4a4b96d13c9a65d3d
|
[
"MIT"
] | 11
|
2020-06-17T11:44:15.000Z
|
2022-01-29T20:39:52.000Z
|
#!/usr/bin/env python3
"""
Push a micropython file to one of SPIKE's program slots
"""
# standard libraries
import argparse
import logging
import os
import random
import string
import subprocess
import sys
from pprint import pformat
log = logging.getLogger(__name__)
MIN_SLOT = 0
MAX_SLOT = 19
def spike_push_file_to_slot(dev: str, filename: str, target_slot: int) -> bool:
"""
Push a micropython file to one of SPIKE's program slots
"""
# ampy requires root perms
if os.geteuid() != 0:
log.error("You must run this program using 'sudo'")
return False
if not os.path.exists(dev):
log.error(f"device '{dev}' is not connected")
return False
if target_slot < MIN_SLOT or target_slot > MAX_SLOT:
log.error(f"invalid target_slot {target_slot} (min {MIN_SLOT}, max {MAX_SLOT})")
return False
if not os.path.isfile(filename):
log.error(f"{filename} does not exist")
return False
# get the size of the file we are pushing
filesize = os.stat(filename).st_size
created_time = int(os.path.getctime(filename))
modified_time = int(os.path.getmtime(filename))
# get the current .slots file
SLOTS_FILENAME = "slots." + "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
subprocess.check_output(f"ampy --port {dev} get projects/.slots > {SLOTS_FILENAME}", shell=True)
with open(SLOTS_FILENAME, "r") as fh:
slots = eval(fh.read())
# Is filename already in that slot? If so use the same ID
if target_slot in slots and slots[target_slot]["name"] == os.path.basename(filename):
filename_id = slots[target_slot]["id"]
# If not delete the old entry in that slot (if there is one) and
# pick a random ID
else:
if target_slot in slots:
# rm the current program in that slot
subprocess.check_output(f"ampy --port {dev} rm projects/{slots[target_slot]['id']}.py", shell=True)
del slots[target_slot]
# Pick a random ID but make sure we pick one that isn't already
# used in some other slot
used_IDs = [x["id"] for x in slots.values()]
while True:
filename_id = random.randint(1000000, 9999999999)
if filename_id not in used_IDs:
break
slots[target_slot] = {
"name": os.path.basename(filename),
"created": created_time,
"modified": modified_time,
"size": filesize,
"id": filename_id,
}
# Write the new .slots file
with open(SLOTS_FILENAME, "w") as fh:
fh.write(pformat(slots) + "\n")
# Copy the new .slots file to SPIKE
subprocess.check_output(f"sudo ampy --port {dev} put {SLOTS_FILENAME} projects/.slots", shell=True)
# Copy filename to SPIKE but name it based on its ID
subprocess.check_output(f"sudo ampy --port {dev} put {filename} projects/{filename_id}.py", shell=True)
os.unlink(SLOTS_FILENAME)
return True
if __name__ == "__main__":
# configure logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(filename)16s %(levelname)8s: %(message)s")
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, help="micropython file to push")
parser.add_argument("slot", type=int, help=f"SPIKE program slot ({MIN_SLOT}-{MAX_SLOT})")
parser.add_argument("--dev", type=str, default="/dev/ttyACM0", help="/dev/ttyXXXXX of SPIKE")
args = parser.parse_args()
if not spike_push_file_to_slot(args.dev, args.filename, args.slot):
sys.exit(1)
| 31.318966
| 111
| 0.657859
|
b89d4a22d52d6d572fe535f3a05c1f120e0e2da9
| 712
|
py
|
Python
|
functions/name_columns.py
|
kaymal/price-project
|
20c9166c65b10da6e9ecd0475172a30483853781
|
[
"MIT"
] | 2
|
2019-06-15T18:49:23.000Z
|
2019-06-20T12:21:42.000Z
|
functions/name_columns.py
|
kaymal/price-project
|
20c9166c65b10da6e9ecd0475172a30483853781
|
[
"MIT"
] | null | null | null |
functions/name_columns.py
|
kaymal/price-project
|
20c9166c65b10da6e9ecd0475172a30483853781
|
[
"MIT"
] | 2
|
2020-11-27T20:36:12.000Z
|
2020-12-09T23:10:28.000Z
|
def name_columns(data):
'''
Name columns with underscore(_) convention.
'''
# Clean "\n"s from the column names
data.columns = data.columns.str.strip("\n")
# Make lowercase
data.columns = data.columns.str.lower()
# Replace space with underscore(_)
data.columns = data.columns.str.replace(" ", "_")
# Replace . with ""
data.columns = data.columns.str.replace(".", "")
# Replace "_&_" with "&"
data.columns = data.columns.str.replace("_&_", "_")
# Replace "-" with "_"
data.columns = data.columns.str.replace("-", "_")
# Replace "\n" with ""
data.columns = data.columns.str.replace("\n", "")
return data.columns
| 27.384615
| 55
| 0.578652
|
6af136f59ad9e3c9611efeb048e889d3d37900f6
| 97
|
py
|
Python
|
tests/integration/testing_project/apps/polls/apps.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | 4
|
2019-07-26T12:42:42.000Z
|
2020-01-27T07:45:09.000Z
|
tests/integration/testing_project/apps/polls/apps.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | 37
|
2019-10-18T18:10:14.000Z
|
2020-01-31T07:46:23.000Z
|
tests/integration/testing_project/apps/polls/apps.py
|
swiatekm/django-test-migrations
|
628f650d2c076ce8ad44222d1a6c674ded90a48a
|
[
"MIT"
] | 1
|
2019-08-03T15:54:50.000Z
|
2019-08-03T15:54:50.000Z
|
from django.apps.config import AppConfig
class PollsConfig(AppConfig):
name = 'apps.polls'
| 16.166667
| 40
| 0.752577
|
ccdb6cf55b33aee80abcb8c333a4c8c6b1edad70
| 44,110
|
py
|
Python
|
python/ccxt/async_support/hitbtc.py
|
xCuri0/ccxt
|
bf3d10e0d3c1defb9aba9e81caa57cad0187fa5c
|
[
"MIT"
] | 4
|
2019-03-18T04:07:18.000Z
|
2019-09-15T07:31:56.000Z
|
python/ccxt/async_support/hitbtc.py
|
xCuri0/ccxt
|
bf3d10e0d3c1defb9aba9e81caa57cad0187fa5c
|
[
"MIT"
] | 1
|
2019-05-16T07:49:46.000Z
|
2019-05-16T07:49:46.000Z
|
python/ccxt/async_support/hitbtc.py
|
xCuri0/ccxt
|
bf3d10e0d3c1defb9aba9e81caa57cad0187fa5c
|
[
"MIT"
] | 2
|
2021-01-03T07:55:47.000Z
|
2021-04-16T11:10:53.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import TICK_SIZE
class hitbtc(Exchange):
def describe(self):
return self.deep_extend(super(hitbtc, self).describe(), {
'id': 'hitbtc',
'name': 'HitBTC',
'countries': ['HK'],
'rateLimit': 1500,
'version': '2',
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTransactions': True,
'fetchWithdrawals': False,
'withdraw': True,
},
'timeframes': {
'1m': 'M1',
'3m': 'M3',
'5m': 'M5',
'15m': 'M15',
'30m': 'M30', # default
'1h': 'H1',
'4h': 'H4',
'1d': 'D1',
'1w': 'D7',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766555-8eaec20e-5edc-11e7-9c5b-6dc69fc42f5e.jpg',
'api': {
'public': 'https://api.hitbtc.com',
'private': 'https://api.hitbtc.com',
},
'www': 'https://hitbtc.com',
'referral': 'https://hitbtc.com/?ref_id=5a5d39a65d466',
'doc': [
'https://api.hitbtc.com',
'https://github.com/hitbtc-com/hitbtc-api/blob/master/APIv2.md',
],
'fees': [
'https://hitbtc.com/fees-and-limits',
'https://support.hitbtc.com/hc/en-us/articles/115005148605-Fees-and-limits',
],
},
'api': {
'public': {
'get': [
'symbol', # Available Currency Symbols
'symbol/{symbol}', # Get symbol info
'currency', # Available Currencies
'currency/{currency}', # Get currency info
'ticker', # Ticker list for all symbols
'ticker/{symbol}', # Ticker for symbol
'trades',
'trades/{symbol}', # Trades
'orderbook',
'orderbook/{symbol}', # Orderbook
'candles',
'candles/{symbol}', # Candles
],
},
'private': {
'get': [
'trading/balance', # Get trading balance
'order', # List your current open orders
'order/{clientOrderId}', # Get a single order by clientOrderId
'trading/fee/all', # Get trading fee rate
'trading/fee/{symbol}', # Get trading fee rate
'history/order', # Get historical orders
'history/trades', # Get historical trades
'history/order/{orderId}/trades', # Get historical trades by specified order
'account/balance', # Get main acccount balance
'account/crypto/address/{currency}', # Get deposit crypro address
'account/crypto/is-mine/{address}',
'account/transactions', # Get account transactions
'account/transactions/{id}', # Get account transaction by id
'sub-acc',
'sub-acc/acl',
'sub-acc/balance/{subAccountUserID}',
'sub-acc/deposit-address/{subAccountUserId}/{currency}',
],
'post': [
'order', # Create new order
'account/crypto/address/{currency}', # Create new deposit crypro address
'account/crypto/withdraw', # Withdraw crypro
'account/crypto/transfer-convert',
'account/transfer', # Transfer amount to trading
'sub-acc/freeze',
'sub-acc/activate',
'sub-acc/transfer',
],
'put': [
'order/{clientOrderId}', # Create new order
'account/crypto/withdraw/{id}', # Commit withdraw crypro
'sub-acc/acl/{subAccountUserId}',
],
'delete': [
'order', # Cancel all open orders
'order/{clientOrderId}', # Cancel order
'account/crypto/withdraw/{id}', # Rollback withdraw crypro
],
# outdated?
'patch': [
'order/{clientOrderId}', # Cancel Replace order
],
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.2 / 100,
},
},
'options': {
'defaultTimeInForce': 'FOK',
},
'commonCurrencies': {
'BCC': 'BCC', # initial symbol for Bitcoin Cash, now inactive
'BET': 'DAO.Casino',
'BOX': 'BOX Token',
'CPT': 'Cryptaur', # conflict with CPT = Contents Protocol https://github.com/ccxt/ccxt/issues/4920 and https://github.com/ccxt/ccxt/issues/6081
'GET': 'Themis',
'HSR': 'HC',
'IQ': 'IQ.Cash',
'LNC': 'LinkerCoin',
'PLA': 'PlayChip',
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'TV': 'Tokenville',
'USD': 'USDT',
'XPNT': 'PNT',
},
'exceptions': {
'504': RequestTimeout, # {"error":{"code":504,"message":"Gateway Timeout"}}
'1002': AuthenticationError, # {"error":{"code":1002,"message":"Authorization failed","description":""}}
'1003': PermissionDenied, # "Action is forbidden for self API key"
'2010': InvalidOrder, # "Quantity not a valid number"
'2001': BadSymbol, # "Symbol not found"
'2011': InvalidOrder, # "Quantity too low"
'2020': InvalidOrder, # "Price not a valid number"
'20002': OrderNotFound, # canceling non-existent order
'20001': InsufficientFunds, # {"error":{"code":20001,"message":"Insufficient funds","description":"Check that the funds are sufficient, given commissions"}}
},
'orders': {}, # orders cache / emulation
})
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_markets(self, params={}):
response = await self.publicGetSymbol(params)
#
# [
# {
# "id":"BCNBTC",
# "baseCurrency":"BCN",
# "quoteCurrency":"BTC",
# "quantityIncrement":"100",
# "tickSize":"0.00000000001",
# "takeLiquidityRate":"0.002",
# "provideLiquidityRate":"0.001",
# "feeCurrency":"BTC"
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
# bequant fix
if id.find('_') >= 0:
symbol = id
lot = self.safe_float(market, 'quantityIncrement')
step = self.safe_float(market, 'tickSize')
precision = {
'price': step,
'amount': lot,
}
taker = self.safe_float(market, 'takeLiquidityRate')
maker = self.safe_float(market, 'provideLiquidityRate')
feeCurrencyId = self.safe_string(market, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
result.append(self.extend(self.fees['trading'], {
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'taker': taker,
'maker': maker,
'precision': precision,
'feeCurrency': feeCurrencyCode,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': step,
'max': None,
},
'cost': {
'min': lot * step,
'max': None,
},
},
}))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetCurrency(params)
#
# [
# {
# "id":"XPNT",
# "fullName":"pToken",
# "crypto":true,
# "payinEnabled":true,
# "payinPaymentId":false,
# "payinConfirmations":9,
# "payoutEnabled":true,
# "payoutIsPaymentId":false,
# "transferEnabled":true,
# "delisted":false,
# "payoutFee":"26.510000000000",
# "precisionPayout":18,
# "precisionTransfer":8
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
decimals = self.safe_integer(currency, 'precisionTransfer', 8)
precision = 1 / math.pow(10, decimals)
code = self.safe_currency_code(id)
payin = self.safe_value(currency, 'payinEnabled')
payout = self.safe_value(currency, 'payoutEnabled')
transfer = self.safe_value(currency, 'transferEnabled')
active = payin and payout and transfer
if 'disabled' in currency:
if currency['disabled']:
active = False
type = 'fiat'
if ('crypto' in currency) and currency['crypto']:
type = 'crypto'
name = self.safe_string(currency, 'fullName')
result[code] = {
'id': id,
'code': code,
'type': type,
'payin': payin,
'payout': payout,
'transfer': transfer,
'info': currency,
'name': name,
'active': active,
'fee': self.safe_float(currency, 'payoutFee'), # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': 1 / math.pow(10, decimals),
'max': math.pow(10, decimals),
},
'price': {
'min': 1 / math.pow(10, decimals),
'max': math.pow(10, decimals),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = self.extend({
'symbol': market['id'],
}, self.omit(params, 'symbol'))
response = await self.privateGetTradingFeeSymbol(request)
#
# {
# takeLiquidityRate: '0.001',
# provideLiquidityRate: '-0.0001'
# }
#
return {
'info': response,
'maker': self.safe_float(response, 'provideLiquidityRate'),
'taker': self.safe_float(response, 'takeLiquidityRate'),
}
async def fetch_balance(self, params={}):
await self.load_markets()
type = self.safe_string(params, 'type', 'trading')
method = 'privateGet' + self.capitalize(type) + 'Balance'
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available')
account['used'] = self.safe_float(balance, 'reserved')
result[code] = account
return self.parse_balance(result)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "timestamp":"2015-08-20T19:01:00.000Z",
# "open":"0.006",
# "close":"0.006",
# "min":"0.006",
# "max":"0.006",
# "volume":"0.003",
# "volumeQuote":"0.000018"
# }
#
return [
self.parse8601(self.safe_string(ohlcv, 'timestamp')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'max'),
self.safe_float(ohlcv, 'min'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if since is not None:
request['from'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = await self.publicGetCandlesSymbol(self.extend(request, params))
#
# [
# {"timestamp":"2015-08-20T19:01:00.000Z","open":"0.006","close":"0.006","min":"0.006","max":"0.006","volume":"0.003","volumeQuote":"0.000018"},
# {"timestamp":"2015-08-20T19:03:00.000Z","open":"0.006","close":"0.006","min":"0.006","max":"0.006","volume":"0.013","volumeQuote":"0.000078"},
# {"timestamp":"2015-08-20T19:06:00.000Z","open":"0.0055","close":"0.005","min":"0.005","max":"0.0055","volume":"0.003","volumeQuote":"0.0000155"},
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['limit'] = limit # default = 100, 0 = unlimited
response = await self.publicGetOrderbookSymbol(self.extend(request, params))
return self.parse_order_book(response, None, 'bid', 'ask', 'price', 'size')
def parse_ticker(self, ticker, market=None):
timestamp = self.parse8601(ticker['timestamp'])
symbol = None
if market is not None:
symbol = market['symbol']
baseVolume = self.safe_float(ticker, 'volume')
quoteVolume = self.safe_float(ticker, 'volumeQuote')
open = self.safe_float(ticker, 'open')
last = self.safe_float(ticker, 'last')
change = None
percentage = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
if open > 0:
percentage = change / open * 100
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTicker(params)
result = {}
for i in range(0, len(response)):
ticker = response[i]
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId)
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTickerSymbol(self.extend(request, params))
if 'message' in response:
raise ExchangeError(self.id + ' ' + response['message'])
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
# createMarketOrder
#
# { fee: "0.0004644",
# id: 386394956,
# price: "0.4644",
# quantity: "1",
# timestamp: "2018-10-25T16:41:44.780Z"}
#
# fetchTrades
#
# {id: 974786185,
# price: '0.032462',
# quantity: '0.3673',
# side: 'buy',
# timestamp: '2020-10-16T12:57:39.846Z'}
#
# fetchMyTrades
#
# {id: 277210397,
# clientOrderId: '6e102f3e7f3f4e04aeeb1cdc95592f1a',
# orderId: 28102855393,
# symbol: 'ETHBTC',
# side: 'sell',
# quantity: '0.002',
# price: '0.073365',
# fee: '0.000000147',
# timestamp: '2018-04-28T18:39:55.345Z'}
timestamp = self.parse8601(trade['timestamp'])
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
fee = None
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
feeCurrencyCode = market['feeCurrency'] if market else None
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
orderId = self.safe_string(trade, 'clientOrderId')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'quantity')
cost = price * amount
side = self.safe_string(trade, 'side')
id = self.safe_string(trade, 'id')
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
response = await self.privateGetAccountTransactions(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# {
# id: 'd53ee9df-89bf-4d09-886e-849f8be64647',
# index: 1044718371,
# type: 'payout',
# status: 'success',
# currency: 'ETH',
# amount: '4.522683200000000000000000',
# createdAt: '2018-06-07T00:43:32.426Z',
# updatedAt: '2018-06-07T00:45:36.447Z',
# hash: '0x973e5683dfdf80a1fb1e0b96e19085b6489221d2ddf864daa46903c5ec283a0f',
# address: '0xC5a59b21948C1d230c8C54f05590000Eb3e1252c',
# fee: '0.00958',
# },
# {
# id: 'e6c63331-467e-4922-9edc-019e75d20ba3',
# index: 1044714672,
# type: 'exchangeToBank',
# status: 'success',
# currency: 'ETH',
# amount: '4.532263200000000000',
# createdAt: '2018-06-07T00:42:39.543Z',
# updatedAt: '2018-06-07T00:42:39.683Z',
# },
# {
# id: '3b052faa-bf97-4636-a95c-3b5260015a10',
# index: 1009280164,
# type: 'bankToExchange',
# status: 'success',
# currency: 'CAS',
# amount: '104797.875800000000000000',
# createdAt: '2018-05-19T02:34:36.750Z',
# updatedAt: '2018-05-19T02:34:36.857Z',
# },
# {
# id: 'd525249f-7498-4c81-ba7b-b6ae2037dc08',
# index: 1009279948,
# type: 'payin',
# status: 'success',
# currency: 'CAS',
# amount: '104797.875800000000000000',
# createdAt: '2018-05-19T02:30:16.698Z',
# updatedAt: '2018-05-19T02:34:28.159Z',
# hash: '0xa6530e1231de409cf1f282196ed66533b103eac1df2aa4a7739d56b02c5f0388',
# address: '0xd53ed559a6d963af7cb3f3fcd0e7ca499054db8b',
# }
#
# {
# "id": "4f351f4f-a8ee-4984-a468-189ed590ddbd",
# "index": 3112719565,
# "type": "withdraw",
# "status": "success",
# "currency": "BCHOLD",
# "amount": "0.02423133",
# "createdAt": "2019-07-16T16:52:04.494Z",
# "updatedAt": "2019-07-16T16:54:07.753Z"
# }
id = self.safe_string(transaction, 'id')
timestamp = self.parse8601(self.safe_string(transaction, 'createdAt'))
updated = self.parse8601(self.safe_string(transaction, 'updatedAt'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_float(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'hash')
fee = None
feeCost = self.safe_float(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = self.parse_transaction_type(self.safe_string(transaction, 'type'))
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'pending': 'pending',
'failed': 'failed',
'success': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'payin': 'deposit',
'payout': 'withdrawal',
'withdraw': 'withdrawal',
}
return self.safe_string(types, type, type)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['sort'] = 'ASC'
request['from'] = self.iso8601(since)
response = await self.publicGetTradesSymbol(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
# their max accepted length is 32 characters
uuid = self.uuid()
parts = uuid.split('-')
clientOrderId = ''.join(parts)
clientOrderId = clientOrderId[0:32]
amount = float(amount)
request = {
'clientOrderId': clientOrderId,
'symbol': market['id'],
'side': side,
'quantity': self.amount_to_precision(symbol, amount),
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
else:
request['timeInForce'] = self.options['defaultTimeInForce']
response = await self.privatePostOrder(self.extend(request, params))
order = self.parse_order(response)
if order['status'] == 'rejected':
raise InvalidOrder(self.id + ' order was rejected by the exchange ' + self.json(order))
return order
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
# their max accepted length is 32 characters
uuid = self.uuid()
parts = uuid.split('-')
requestClientId = ''.join(parts)
requestClientId = requestClientId[0:32]
request = {
'clientOrderId': id,
'requestClientId': requestClientId,
}
if amount is not None:
request['quantity'] = self.amount_to_precision(symbol, amount)
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePatchOrderClientOrderId(self.extend(request, params))
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = await self.privateDeleteOrderClientOrderId(self.extend(request, params))
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'suspended': 'open',
'partiallyFilled': 'open',
'filled': 'closed',
'canceled': 'canceled',
'expired': 'failed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createMarketOrder
#
# {clientOrderId: "fe36aa5e190149bf9985fb673bbb2ea0",
# createdAt: "2018-10-25T16:41:44.780Z",
# cumQuantity: "1",
# id: "66799540063",
# quantity: "1",
# side: "sell",
# status: "filled",
# symbol: "XRPUSDT",
# timeInForce: "FOK",
# tradesReport: [{ fee: "0.0004644",
# id: 386394956,
# price: "0.4644",
# quantity: "1",
# timestamp: "2018-10-25T16:41:44.780Z"}],
# type: "market",
# updatedAt: "2018-10-25T16:41:44.780Z" }
#
created = self.parse8601(self.safe_string(order, 'createdAt'))
updated = self.parse8601(self.safe_string(order, 'updatedAt'))
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
amount = self.safe_float(order, 'quantity')
filled = self.safe_float(order, 'cumQuantity')
status = self.parse_order_status(self.safe_string(order, 'status'))
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
id = self.safe_string(order, 'clientOrderId')
clientOrderId = id
price = self.safe_float(order, 'price')
remaining = None
cost = None
if amount is not None:
if filled is not None:
remaining = amount - filled
if price is not None:
cost = filled * price
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
trades = self.safe_value(order, 'tradesReport')
fee = None
average = None
if trades is not None:
trades = self.parse_trades(trades, market)
feeCost = None
numTrades = len(trades)
tradesCost = 0
for i in range(0, numTrades):
if feeCost is None:
feeCost = 0
tradesCost = self.sum(tradesCost, trades[i]['cost'])
tradeFee = self.safe_value(trades[i], 'fee', {})
tradeFeeCost = self.safe_float(tradeFee, 'cost')
if tradeFeeCost is not None:
feeCost = self.sum(feeCost, tradeFeeCost)
cost = tradesCost
if (filled is not None) and (filled > 0):
average = cost / filled
if type == 'market':
if price is None:
price = average
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['quote'],
}
timeInForce = self.safe_string(order, 'timeInForce')
return {
'id': id,
'clientOrderId': clientOrderId, # https://github.com/ccxt/ccxt/issues/5674
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': updated,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'side': side,
'price': price,
'stopPrice': None,
'average': average,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'fee': fee,
'trades': trades,
'info': order,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = await self.privateGetHistoryOrder(self.extend(request, params))
numOrders = len(response)
if numOrders > 0:
return self.parse_order(response[0])
raise OrderNotFound(self.id + ' order ' + id + ' not found')
async def fetch_open_order(self, id, symbol=None, params={}):
await self.load_markets()
# we use clientOrderId as the order id with self exchange intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = await self.privateGetOrderClientOrderId(self.extend(request, params))
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privateGetOrder(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
if since is not None:
request['from'] = self.iso8601(since)
response = await self.privateGetHistoryOrder(self.extend(request, params))
parsedOrders = self.parse_orders(response, market)
orders = []
for i in range(0, len(parsedOrders)):
order = parsedOrders[i]
status = order['status']
if (status == 'closed') or (status == 'canceled'):
orders.append(order)
return self.filter_by_since_limit(orders, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'symbol': 'BTC/USD', # optional
# 'sort': 'DESC', # or 'ASC'
# 'by': 'timestamp', # or 'id' String timestamp by default, or id
# 'from': 'Datetime or Number', # ISO 8601
# 'till': 'Datetime or Number',
# 'limit': 100,
# 'offset': 0,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['from'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = await self.privateGetHistoryTrades(self.extend(request, params))
#
# [
# {
# "id": 9535486,
# "clientOrderId": "f8dbaab336d44d5ba3ff578098a68454",
# "orderId": 816088377,
# "symbol": "ETHBTC",
# "side": "sell",
# "quantity": "0.061",
# "price": "0.045487",
# "fee": "0.000002775",
# "timestamp": "2017-05-17T12:32:57.848Z"
# },
# {
# "id": 9535437,
# "clientOrderId": "27b9bfc068b44194b1f453c7af511ed6",
# "orderId": 816088021,
# "symbol": "ETHBTC",
# "side": "buy",
# "quantity": "0.038",
# "price": "0.046000",
# "fee": "-0.000000174",
# "timestamp": "2017-05-17T12:30:57.848Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
# The id needed here is the exchange's id, and not the clientOrderID,
# which is the id that is stored in the unified order id
# To get the exchange's id you need to grab it from order['info']['id']
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'orderId': id,
}
response = await self.privateGetHistoryOrderOrderIdTrades(self.extend(request, params))
numOrders = len(response)
if numOrders > 0:
return self.parse_trades(response, market, since, limit)
raise OrderNotFound(self.id + ' order ' + id + ' not found, ' + self.id + '.fetchOrderTrades() requires an exchange-specific order id, you need to grab it from order["info"]["id"]')
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privatePostAccountCryptoAddressCurrency(self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
tag = self.safe_string(response, 'paymentId')
return {
'currency': currency,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.privateGetAccountCryptoAddressCurrency(self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
tag = self.safe_string(response, 'paymentId')
return {
'currency': currency['code'],
'address': address,
'tag': tag,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': float(amount),
'address': address,
}
if tag:
request['paymentId'] = tag
response = await self.privatePostAccountCryptoWithdraw(self.extend(request, params))
return {
'info': response,
'id': response['id'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/api/' + self.version + '/'
query = self.omit(params, self.extract_params(path))
if api == 'public':
url += api + '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
url += self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif query:
body = self.json(query)
payload = self.encode(self.apiKey + ':' + self.secret)
auth = self.string_to_base64(payload)
headers = {
'Authorization': 'Basic ' + self.decode(auth),
'Content-Type': 'application/json',
}
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if code >= 400:
feedback = self.id + ' ' + body
# {"code":504,"message":"Gateway Timeout","description":""}
if (code == 503) or (code == 504):
raise ExchangeNotAvailable(feedback)
# fallback to default error handler on rate limit errors
# {"code":429,"message":"Too many requests","description":"Too many requests"}
if code == 429:
return
# {"error":{"code":20002,"message":"Order not found","description":""}}
if body[0] == '{':
if 'error' in response:
errorCode = self.safe_string(response['error'], 'code')
self.throw_exactly_matched_exception(self.exceptions, errorCode, feedback)
message = self.safe_string(response['error'], 'message')
if message == 'Duplicate clientOrderId':
raise InvalidOrder(feedback)
raise ExchangeError(feedback)
| 40.95636
| 189
| 0.503786
|
084d6cc25738cbc9633ef6817e4763ca23ce8676
| 3,724
|
py
|
Python
|
tests/test_models/test_dense_heads/test_sam_reppoints_head.py
|
williamcorsel/mmrotate
|
00a3b9af34c4e36c82616d98fdb91b468d4cfb34
|
[
"Apache-2.0"
] | 449
|
2022-02-18T08:26:58.000Z
|
2022-03-31T11:58:32.000Z
|
tests/test_models/test_dense_heads/test_sam_reppoints_head.py
|
williamcorsel/mmrotate
|
00a3b9af34c4e36c82616d98fdb91b468d4cfb34
|
[
"Apache-2.0"
] | 162
|
2022-02-18T09:54:46.000Z
|
2022-03-31T15:40:46.000Z
|
tests/test_models/test_dense_heads/test_sam_reppoints_head.py
|
williamcorsel/mmrotate
|
00a3b9af34c4e36c82616d98fdb91b468d4cfb34
|
[
"Apache-2.0"
] | 98
|
2022-02-18T08:28:48.000Z
|
2022-03-31T08:52:11.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import pytest
import torch
from mmrotate.models.dense_heads import SAMRepPointsHead
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
@pytest.mark.parametrize('reassign', [True, False])
def test_sam_head_loss(reassign):
"""Tests sam head loss when truth is empty and non-empty.
Args:
reassign (bool): If True, reassign samples.
"""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
init=dict(
assigner=dict(type='ConvexAssigner', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(type='SASAssigner', topk=3),
allowed_border=-1,
pos_weight=-1,
debug=False)))
self = SAMRepPointsHead(
num_classes=15,
in_channels=1,
feat_channels=1,
point_feat_channels=1,
stacked_convs=3,
num_points=9,
gradient_mul=0.3,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=2,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_init=dict(type='BCConvexGIoULoss', loss_weight=0.375),
loss_bbox_refine=dict(type='ConvexGIoULoss', loss_weight=1.0),
transform_method='rotrect',
topk=6,
anti_factor=0.75,
train_cfg=train_cfg).cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, pts_out_init, pts_out_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 5)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, pts_out_init, pts_out_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_pts_init_loss = sum(empty_gt_losses['loss_pts_init'])
empty_pts_refine_loss = sum(empty_gt_losses['loss_pts_refine'])
assert empty_cls_loss.item() != 0, 'cls loss should be non-zero'
assert empty_pts_init_loss.item() == 0, (
'there should be no pts_init loss when there are no true boxes')
assert empty_pts_refine_loss.item() == 0, (
'there should be no pts_refine loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874, 0.]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, pts_out_init, pts_out_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_pts_init_loss = sum(one_gt_losses['loss_pts_init'])
onegt_pts_refine_loss = sum(one_gt_losses['loss_pts_refine'])
assert onegt_cls_loss.item() != 0, 'cls loss should be non-zero'
assert onegt_pts_init_loss.item() >= 0, 'pts_init loss should be non-zero'
assert onegt_pts_refine_loss.item() >= 0, (
'pts_refine loss should be non-zero')
| 38
| 79
| 0.615199
|
870902ab628d69425b587b76762dc224382e3a24
| 629
|
py
|
Python
|
tests/unit/expression/test_quantified.py
|
rinslow/regene
|
b1aa6cf9dcd436d547e9d4276bb850644dfc53aa
|
[
"MIT"
] | 2
|
2018-02-23T23:12:44.000Z
|
2019-04-28T03:20:17.000Z
|
tests/unit/expression/test_quantified.py
|
rinslow/regene
|
b1aa6cf9dcd436d547e9d4276bb850644dfc53aa
|
[
"MIT"
] | 2
|
2018-02-27T21:36:24.000Z
|
2018-03-09T11:28:44.000Z
|
tests/unit/expression/test_quantified.py
|
rinslow/regene
|
b1aa6cf9dcd436d547e9d4276bb850644dfc53aa
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from regene.expression import Exactly
from regene.expression.quantified import Quantified
from regene.expression.string import String
class QuantifiedTest(TestCase):
def test_str(self):
assert Quantified(String("1"), Exactly(1)) == "1"
def test_quantifiers(self):
assert Quantified(String("A"), Exactly(2)) == "AA"
def test_group_within_a_group(self):
assert Quantified(
Quantified(String("A"), Exactly(1)),
Exactly(1)) == "A"
def test_multiplication(self):
assert (Quantified(String("1"), Exactly(1)) * 2) * 2 == "1111"
| 28.590909
| 70
| 0.664547
|
df5eb74500ffa3f50a4e462387522948d72bf0eb
| 304,872
|
py
|
Python
|
pysnmp-with-texts/StorageManagement-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/StorageManagement-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/StorageManagement-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module StorageManagement-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/StorageManagement-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:14:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, TimeTicks, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, ModuleIdentity, ObjectIdentity, IpAddress, MibIdentifier, NotificationType, NotificationType, enterprises, Gauge32, Counter32, Bits, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "TimeTicks", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "ModuleIdentity", "ObjectIdentity", "IpAddress", "MibIdentifier", "NotificationType", "NotificationType", "enterprises", "Gauge32", "Counter32", "Bits", "Counter64")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DellStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("nonCritical", 4), ("critical", 5), ("nonRecoverable", 6))
dell = MibIdentifier((1, 3, 6, 1, 4, 1, 674))
storage = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1))
storageManagement = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20))
softwareVersion = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: softwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: softwareVersion.setDescription('Version number of the storage management component of the systems management software.')
globalStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("critical", 1), ("warning", 2), ("normal", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: globalStatus.setStatus('mandatory')
if mibBuilder.loadTexts: globalStatus.setDescription('Global health information for the subsystem managed by the Storage Management software. This global status is customized for HP OpenView. Other applications should refer to the agentSystemGlobalStatus entry in the globalData object group. Possible values: 1: Critical 2: Warning 3: Normal 4: Unknown')
softwareManufacturer = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: softwareManufacturer.setStatus('mandatory')
if mibBuilder.loadTexts: softwareManufacturer.setDescription('Manufacturer information for the Storage Management software.')
softwareProduct = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: softwareProduct.setStatus('mandatory')
if mibBuilder.loadTexts: softwareProduct.setDescription('Product information for the Storage Management software.')
softwareDescription = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: softwareDescription.setStatus('mandatory')
if mibBuilder.loadTexts: softwareDescription.setDescription('Product Description for the Storage Management software.')
storageManagementInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 100))
displayName = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 100, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: displayName.setStatus('mandatory')
if mibBuilder.loadTexts: displayName.setDescription('Name of this management software for display purposes.')
description = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 100, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: description.setStatus('mandatory')
if mibBuilder.loadTexts: description.setDescription('A short description of this management software.')
agentVendor = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 100, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentVendor.setStatus('mandatory')
if mibBuilder.loadTexts: agentVendor.setDescription('The name of the management software manufacturer.')
agentVersion = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 100, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentVersion.setStatus('obsolete')
if mibBuilder.loadTexts: agentVersion.setDescription('This entry is obsolete. Refer to softwareVersion.')
globalData = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110))
agentSystemGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("warning", 2), ("nonCriticalError", 3), ("failure", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentSystemGlobalStatus.setStatus('obsolete')
if mibBuilder.loadTexts: agentSystemGlobalStatus.setDescription('This entry is obsolete. Please use the value: agentGlobalSystemStatus. Note: Enumerated values have changed.')
agentLastGlobalStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("warning", 2), ("nonCriticalError", 3), ("failure", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalStatus.setStatus('obsolete')
if mibBuilder.loadTexts: agentLastGlobalStatus.setDescription('This entry is obsolete. Please use the value: agentLastGlobalSystemStatus. Note: Enumerated values have changed.')
agentTimeStamp = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: agentTimeStamp.setDescription('The last time that the agent values have been updated. Universal time in sec since UTC 1/1/70.')
agentGetTimeout = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGetTimeout.setStatus('mandatory')
if mibBuilder.loadTexts: agentGetTimeout.setDescription('Suggested timeout value in milliseconds for how long the SNMP getter should wait while attempting to poll the SNMP agent.')
agentModifiers = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentModifiers.setStatus('mandatory')
if mibBuilder.loadTexts: agentModifiers.setDescription('Agent functional modifiers, when set the modifier is active. Bit definitions: Bit 3: agent in debug mode. All other bits are product specific ')
agentRefreshRate = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentRefreshRate.setStatus('mandatory')
if mibBuilder.loadTexts: agentRefreshRate.setDescription('The rate, given in seconds, at which the cached data for SNMP is refreshed. The default value is 300 seconds, or 5 minutes.')
agentHostname = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentHostname.setStatus('obsolete')
if mibBuilder.loadTexts: agentHostname.setDescription('This entry is obsolete for Storage Management.')
agentIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentIPAddress.setStatus('obsolete')
if mibBuilder.loadTexts: agentIPAddress.setDescription('This entry is obsolete for Storage Management.')
agentSoftwareStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("databaseUp", 1), ("databaseDown", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentSoftwareStatus.setStatus('obsolete')
if mibBuilder.loadTexts: agentSoftwareStatus.setDescription('This entry is obsolete for Storage Management.')
agentSnmpVersion = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentSnmpVersion.setStatus('obsolete')
if mibBuilder.loadTexts: agentSnmpVersion.setDescription('This entry is obsolete. Refer to softwareVersion.')
agentMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentMibVersion.setStatus('mandatory')
if mibBuilder.loadTexts: agentMibVersion.setDescription('Version of the Storage Management MIB.')
agentManagementSoftwareURLName = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentManagementSoftwareURLName.setStatus('mandatory')
if mibBuilder.loadTexts: agentManagementSoftwareURLName.setDescription('The Universal Resource Locator (URL) of the systems management software.')
agentGlobalSystemStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 13), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentGlobalSystemStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentGlobalSystemStatus.setDescription('Global health information for the subsystem managed by the Storage Management software. This global status should be used by applications other than HP OpenView. HP OpenView should refer to the globalStatus in the root level object group. This is a rollup for the entire agent including any monitored devices. The status is intended to give initiative to a snmp monitor to get further data when this status is abnormal. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
agentLastGlobalSystemStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 14), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentLastGlobalSystemStatus.setStatus('mandatory')
if mibBuilder.loadTexts: agentLastGlobalSystemStatus.setDescription('The previous global status of the system managed by the Storage Management software. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
agentSmartThermalShutdown = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 110, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("notApplicable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentSmartThermalShutdown.setStatus('mandatory')
if mibBuilder.loadTexts: agentSmartThermalShutdown.setDescription('Indicates the status of smart thermal shutdown for PV220S and PV221S enclosures. Possible values: 1: Enabled 2: Disabled')
physicalDevices = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130))
channelTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2), )
if mibBuilder.loadTexts: channelTable.setStatus('mandatory')
if mibBuilder.loadTexts: channelTable.setDescription('A table of controller channels. The number of entries is related to the number of channels supported by a RAID controller. Perc2 RAID controller supports a max of 4 channels per controller. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
channelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1), ).setIndexNames((0, "StorageManagement-MIB", "channelNumber"))
if mibBuilder.loadTexts: channelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: channelEntry.setDescription('An entry in the channel table. A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
channelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelNumber.setStatus('mandatory')
if mibBuilder.loadTexts: channelNumber.setDescription('Instance number of this channel entry.')
channelName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelName.setStatus('mandatory')
if mibBuilder.loadTexts: channelName.setDescription('The name of the channel as represented in Storage Management. The name will include the word channel and the instance. For example: Channel 1')
channelState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("degraded", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelState.setStatus('mandatory')
if mibBuilder.loadTexts: channelState.setDescription('The current condition of the channel. Possible states: 0: Unknown 1: Ready - The I/O has resumed. 2: Failed 3: Online 4: Offline - The I/O has paused. 6: Degraded ')
channelSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: channelSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
channelTermination = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("wide", 1), ("narrow", 2), ("notTerminated", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelTermination.setStatus('mandatory')
if mibBuilder.loadTexts: channelTermination.setDescription('The type of SCSI termination on this channel. Termination is required for proper operation of this channel. 1: Wide Termination (16 bit) 2: Narrow Termination (8 bit) 3: Not Terminated')
channelSCSIID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelSCSIID.setStatus('mandatory')
if mibBuilder.loadTexts: channelSCSIID.setDescription('The SCSI ID of the controller to which the channel belongs.')
channelRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 7), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: channelRollUpStatus.setDescription('Severity of the channel state. This is the combined status of the channel and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
channelComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 8), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: channelComponentStatus.setDescription('The status of the channel itself with out without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
channelNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: channelNexusID.setDescription('Durable unique ID for this channel.')
channelDataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelDataRate.setStatus('mandatory')
if mibBuilder.loadTexts: channelDataRate.setDescription('The data rate (SCSI speed) of the channel. Example: Ultra 320M SCSI')
channelBusType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 7, 8, 9))).clone(namedValues=NamedValues(("scsi", 1), ("ide", 2), ("fibreChannel", 3), ("ssa", 4), ("usb", 6), ("sata", 7), ("sas", 8), ("pcie", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: channelBusType.setStatus('mandatory')
if mibBuilder.loadTexts: channelBusType.setDescription('The bus type of the channel. Possible values: 1: SCSI 2: IDE 3: Fibre Channel 4: SSA 6: USB 7: SATA 8: SAS 9: PCIe')
enclosureTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3), )
if mibBuilder.loadTexts: enclosureTable.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureTable.setDescription('A table of managed Enclosures. The number of entries is related to number of enclosures discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
enclosureEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1), ).setIndexNames((0, "StorageManagement-MIB", "enclosureNumber"))
if mibBuilder.loadTexts: enclosureEntry.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureEntry.setDescription('An entry in the Enclosure table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
enclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureNumber.setDescription('Instance number of the enclosure entry.')
enclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureName.setDescription("The enclosure's name as represented in Storage Management.")
enclosureVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureVendor.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureVendor.setDescription("The enclosure's (re)seller's name.")
enclosureState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("degraded", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureState.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureState.setDescription('The current condition of the enclosure (which includes any devices connected to it.) Possible states: 0: Unknown 1: Ready 2: Failed 3: Online 4: Offline 6: Degraded')
enclosureSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
enclosureID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureID.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureID.setDescription('Represents unique id for an enclosure.')
enclosureProcessorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureProcessorVersion.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureProcessorVersion.setDescription('This entry is obsolete for Storage Management.')
enclosureServiceTag = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureServiceTag.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureServiceTag.setDescription('Enclosure identification used when consulting customer support.')
enclosureAssetTag = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureAssetTag.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureAssetTag.setDescription('User definable asset tag for the enclosure.')
enclosureAssetName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureAssetName.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureAssetName.setDescription('User definable asset name of the enclosure.')
enclosureSplitBusPartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSplitBusPartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureSplitBusPartNumber.setDescription("The enclosure's split bus part number.")
enclosureProductID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureProductID.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureProductID.setDescription("The enclosure's product identification. This also corresponds to the enclosure type. ")
enclosureKernelVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureKernelVersion.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureKernelVersion.setDescription('This entry is obsolete for Storage Management. Refer to enclosureFirmwareVersion for the firmware version of the enclosure.')
enclosureESM1PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureESM1PartNumber.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureESM1PartNumber.setDescription('This entry is obsolete for Storage Management.')
enclosureESM2PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 15), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureESM2PartNumber.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureESM2PartNumber.setDescription('This entry is obsolete for Storage Management.')
enclosureType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("internal", 1), ("dELLPV200SPV201S", 2), ("dELLPV210SPV211S", 3), ("dELLPV220SPV221S", 4), ("dELLPV660F", 5), ("dELLPV224F", 6), ("dELLPV660F224F", 7), ("md1000", 8), ("md1120", 9), ("md1200", 10), ("md1220", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureType.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureType.setDescription('The type of this enclosure. Possible values: 1: Internal 2: Dell PV200S (PV201S) 3: Dell PV210S (PV211S) 4: Dell PV220S (PV221S) 5: Dell PV660F 6: Dell PV224F 7: Dell PV660F / PV224F 8: Dell MD1000 9: Dell MD1120 10: Dell MD1200 11: Dell MD1220')
enclosureProcessor2Version = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 17), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureProcessor2Version.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureProcessor2Version.setDescription('This entry is obsolete for Storage Management.')
enclosureConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("joined", 1), ("splitBus", 2), ("clustered", 3), ("unified", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureConfig.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureConfig.setDescription("The current configuration of the enclosure's backplane. Possible values: 1: Joined 2: Split Bus 3: Clustered 4: Unified")
enclosureChannelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureChannelNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureChannelNumber.setDescription('The channel number, or bus, to which the enclosure is connected.')
enclosureAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureAlarm.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureAlarm.setDescription("The current status of the enclosure's alarm (PV220S, and PV221S only.) Possible values: 1: Disabled 2: Enabled")
enclosureBackplanePartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 21), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureBackplanePartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureBackplanePartNumber.setDescription("The part number of the enclosure's backplane.")
enclosureSCSIID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSCSIID.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureSCSIID.setDescription('The SCSI ID of the controller to which this enclosure is attached.')
enclosureRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 23), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureRollUpStatus.setDescription('Severity of the enclosure state. This is the combined status of the enclosure and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
enclosureComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 24), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureComponentStatus.setDescription('The status of the enclosure itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
enclosureNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 25), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureNexusID.setDescription('Durable unique ID for this enclosure.')
enclosureFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 26), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureFirmwareVersion.setDescription('The firmware version of the enclosure.')
enclosureSCSIRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 27), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSCSIRate.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureSCSIRate.setDescription('Actual SCSI rate in the enclosure. ')
enclosurePartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 28), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosurePartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosurePartNumber.setDescription('The part number of the enclosure. ')
enclosureSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 29), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSerialNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureSerialNumber.setDescription('Serial number of the enclosure. ')
enclosureSASAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 30), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureSASAddress.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureSASAddress.setDescription('The specified SAS address if this is a SAS enclosure. ')
enclosureOccupiedSlotCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureOccupiedSlotCount.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureOccupiedSlotCount.setDescription('Shows the number of physical disk slots occupied in a storage enclosure. Note: A value of 9999 indicates feature not available.')
enclosureTotalSlots = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 32), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureTotalSlots.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureTotalSlots.setDescription('Shows the total number of physical slots in a storage enclosure; it includes total count of occupied and empty slots. Note: A value of 9999 indicates feature not available. ')
enclosureEmptySlotCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 33), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureEmptySlotCount.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureEmptySlotCount.setDescription('Shows the number of empty physical disk slots in a storage enclosure. Note: A value of 9999 indicates feature not available. ')
enclosureExpressServiceCode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 3, 1, 34), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureExpressServiceCode.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureExpressServiceCode.setDescription('Express Service Code (ESC) is base10 converted numerical value of service tag.')
arrayDiskTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4), )
if mibBuilder.loadTexts: arrayDiskTable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskTable.setDescription('A table of managed Array Disks. The number of entries is related to number of Array Disks discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
arrayDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1), ).setIndexNames((0, "StorageManagement-MIB", "arrayDiskNumber"))
if mibBuilder.loadTexts: arrayDiskEntry.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEntry.setDescription('An entry in the Array Disk table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
arrayDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskNumber.setDescription('Instance number of this array disk entry.')
arrayDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskName.setDescription('The name of the array disk as represented in Storage Management.')
arrayDiskVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskVendor.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskVendor.setDescription("The array disk's manufacturer's name.")
arrayDiskState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 7, 11, 13, 14, 15, 22, 23, 24, 25, 26, 28, 34, 35, 39, 40, 41, 53, 56))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("degraded", 6), ("recovering", 7), ("removed", 11), ("non-raid", 13), ("notReady", 14), ("resynching", 15), ("replacing", 22), ("spinningDown", 23), ("rebuild", 24), ("noMedia", 25), ("formatting", 26), ("diagnostics", 28), ("predictiveFailure", 34), ("initializing", 35), ("foreign", 39), ("clear", 40), ("unsupported", 41), ("incompatible", 53), ("readOnly", 56)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskState.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskState.setDescription('The current condition of the array disk. Possible states: 0: Unknown 1: Ready - Available for use, but no RAID configuration has been assigned. 2: Failed - Not operational. 3: Online - Operational. RAID configuration has been assigned. 4: Offline - The drive is not available to the RAID controller. 6: Degraded - Refers to a fault-tolerant array/virtual disk that has a failed disk. 7: Recovering - Refers to state of recovering from bad blocks on disks. 11: Removed - Indicates that array disk has been removed. 13: Non-RAID - Indicates that array disk is not a RAID capable disk 14: Not Ready - Applicable for PCIeSSD devices indicating that the device is in locked state 15: Resynching - Indicates one of the following types of disk operations: Transform Type, Reconfiguration, and Check Consistency. 22: Replacing - Indicates copyback operation is in progress. 23: Spinning Down - Transit state when the physical disk is spinning down for physical disk power management. 24: Rebuild 25: No Media - CD-ROM or removable disk has no media. 26: Formatting - In the process of formatting. 28: Diagnostics - Diagnostics are running. 34: Predictive failure 35: Initializing: Applies only to virtual disks on PERC, PERC 2/SC, and PERC 2/DC controllers. 39: Foreign 40: Clear 41: Unsupported 53: Incompatible 56: Read Only - Applicable for PCIeSSD devices. Indicates that device has reached read-only state ')
arrayDiskSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: arrayDiskSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
arrayDiskProductID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskProductID.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskProductID.setDescription('The model number of the array disk.')
arrayDiskSerialNo = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSerialNo.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskSerialNo.setDescription("The array disk's unique identification number from the manufacturer. ")
arrayDiskRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskRevision.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskRevision.setDescription('The firmware version of the array disk.')
arrayDiskEnclosureID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureID.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureID.setDescription('The SCSI ID of the enclosure processor to which this array disk belongs.')
arrayDiskChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannel.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannel.setDescription('The bus to which this array disk is connected.')
arrayDiskLengthInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLengthInMB.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLengthInMB.setDescription('The size in megabytes of the array disk. If this size is 0, it is smaller than a megabyte.')
arrayDiskLengthInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLengthInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLengthInBytes.setDescription('The size of the array disk in bytes that is less than a megabyte. This size plus the arrayDiskLengthInMB is the total size of the array disk.')
arrayDiskLargestContiguousFreeSpaceInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLargestContiguousFreeSpaceInMB.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLargestContiguousFreeSpaceInMB.setDescription('The size in megabytes of the largest contiguous free space on the array disk. If this size is 0, it is less than a megabyte.')
arrayDiskLargestContiguousFreeSpaceInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLargestContiguousFreeSpaceInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLargestContiguousFreeSpaceInBytes.setDescription('The size of the largest contiguous free space on this array disk in bytes that is less than a megabyte. This size plus the arrayDiskLargestContiguousFreeSpaceInMB is the total size of the largest contiguous free space on the array disk.')
arrayDiskTargetID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskTargetID.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskTargetID.setDescription('The SCSI target ID which this array disk is assigned. ')
arrayDiskLunID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLunID.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLunID.setDescription("The array disk's logical unit number. ")
arrayDiskUsedSpaceInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskUsedSpaceInMB.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskUsedSpaceInMB.setDescription('The amount in megabytes of the used space on the array disk. If this size is 0, it is smaller than a megabyte.')
arrayDiskUsedSpaceInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskUsedSpaceInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskUsedSpaceInBytes.setDescription('The size in bytes of the used space on the array disk that is smaller than a megabyte. This size plus the arrayDiskUsedSpaceInMB is the total amount of used space on the array disk.')
arrayDiskFreeSpaceInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskFreeSpaceInMB.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskFreeSpaceInMB.setDescription('The amount in megabytes of the free space on the array disk. If this size is 0, it is smaller than a megabyte.')
arrayDiskFreeSpaceInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskFreeSpaceInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskFreeSpaceInBytes.setDescription('The size in bytes of the free space on the array disk that is smaller than a megabyte. This size plus the arrayDiskFreeSpaceInMB is the total amount of free space on the array disk.')
arrayDiskBusType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 7, 8, 9))).clone(namedValues=NamedValues(("scsi", 1), ("ide", 2), ("fibre", 3), ("ssa", 4), ("usb", 6), ("sata", 7), ("sas", 8), ("pcie", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskBusType.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskBusType.setDescription('The bus type of the array disk. Possible values: 1: SCSI 2: IDE 3: Fibre Channel 4: SSA 6: USB 7: SATA 8: SAS 9: PCIe')
arrayDiskSpareState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))).clone(namedValues=NamedValues(("memberVD", 1), ("memberDG", 2), ("globalHotSpare", 3), ("dedicatedHotSpare", 4), ("notASpare", 5), ("notApplicable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSpareState.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskSpareState.setDescription('The status of the array disk as a spare. Possible states: 1 : disk is a member of a virtual disk 2 : disk is a member of a disk group 3 : disk is a global hot spare 4 : disk is a dedicated hot spare 5 : not a spare 99: not applicable')
arrayDiskRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 23), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskRollUpStatus.setDescription('Severity of the array disk state. This is the combined status of the array disk and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
arrayDiskComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 24), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskComponentStatus.setDescription('The status of the array disk itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
arrayDiskDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 25), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskDeviceName.setDescription('The operating system device name for this disk. This property is only applicable to array disks attached to a RAID controller.')
arrayDiskNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 26), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskNexusID.setDescription('Durable unique ID for this array disk.')
arrayDiskPartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 27), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskPartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskPartNumber.setDescription('The part number of the disk. ')
arrayDiskSASAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 28), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSASAddress.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskSASAddress.setDescription('The specified SAS address if this is a SAS disk. ')
arrayDiskNegotiatedSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskNegotiatedSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskNegotiatedSpeed.setDescription('The speed at which the drive is actually running in MPS (megabytes per second). ')
arrayDiskCapableSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskCapableSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskCapableSpeed.setDescription('The maximum speed at which the drive is capable of negotiating in MPS (megabytes per second). ')
arrayDiskSmartAlertIndication = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSmartAlertIndication.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskSmartAlertIndication.setDescription('Indicated whether the disk has received a predictive failure. Possible values: 1: No - disk has not received a predictive failure alert 2: Yes - disk has received a predictive failure alert')
arrayDiskManufactureDay = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 32), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskManufactureDay.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskManufactureDay.setDescription('The day of the week (1=Sunday thru 7=Saturday) on which this disk was manufactured.')
arrayDiskManufactureWeek = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 33), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskManufactureWeek.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskManufactureWeek.setDescription('The week (1 thru 53) in which this disk was manufactured.')
arrayDiskManufactureYear = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 34), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskManufactureYear.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskManufactureYear.setDescription('The four digit year in which this disk was manufactured.')
arrayDiskMediaType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("hdd", 2), ("ssd", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskMediaType.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskMediaType.setDescription('The Media type of the array disk. Possible Values: 1:unknown 2:hdd 3:ssd')
arrayDiskDellCertified = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 36), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskDellCertified.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskDellCertified.setDescription('Indicates if array disk is certified by Dell. Value: 1 - Certified, 0 - Not Certified, 99 - Unknown')
arrayDiskAltaVendorId = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 37), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskAltaVendorId.setStatus('obsolete')
if mibBuilder.loadTexts: arrayDiskAltaVendorId.setDescription('Provides vendor information for Alta interposer.')
arrayDiskAltaProductId = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 38), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskAltaProductId.setStatus('obsolete')
if mibBuilder.loadTexts: arrayDiskAltaProductId.setDescription('Provides product id for Alta interposer.')
arrayDiskAltaRevisionId = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 39), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskAltaRevisionId.setStatus('obsolete')
if mibBuilder.loadTexts: arrayDiskAltaRevisionId.setDescription('Provides revision id for Alta interposer.')
arrayDiskEncryptionCapable = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 40), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEncryptionCapable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEncryptionCapable.setDescription('Indicates if physical disk is Encryption capable. Value: 1 - Capable, 0 - Not Capable, 99 - Not Applicable')
arrayDiskEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 41), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEncrypted.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEncrypted.setDescription('Indicates if the physical disk has encryption enabled. Value: 1 - Yes, 0 - No, 99 - Not Applicable')
arrayDiskPowerState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 42), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskPowerState.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskPowerState.setDescription('Indicates power state of a physical drive. Value: 0 - Spun up, 1- Spun down, 255 - Transition, 99 - Not Applicable')
arrayDiskDriveWriteCache = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 43), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskDriveWriteCache.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskDriveWriteCache.setDescription('Indicates drive write cache capability for PCIe SSD devices. Value: 1 - Enabled, 0 - Disabled, 99 - Undetermined/Not Applicable')
arrayDiskModelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 44), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskModelNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskModelNumber.setDescription('Provides PCIe SSD device model number.')
arrayDiskLifeRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 45), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLifeRemaining.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLifeRemaining.setDescription('Provides PCIe SSD device life remaining in percentage. Value: 0..100, 999 - Undetermined/Not Applicable ')
arrayDiskDriverVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 46), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskDriverVersion.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskDriverVersion.setDescription('Provides PCIe SSD device driver version.')
arrayDiskDeviceLifeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 47), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskDeviceLifeStatus.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskDeviceLifeStatus.setDescription('Provides PCIe SSD device life status. Possible Values: -1: Not Available / Not Applicable 1: Drive Health Good 2: Approaching Warranty Coverage Expiry 3: Warranty Coverage Expired 4: Approaching Read Only 5: Read Only')
arrayDiskReadOnly = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 48), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskReadOnly.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskReadOnly.setDescription("Provides PCIe SSD's Read Only attribute. Possible Values: Yes, No, Not Applicable ")
arrayDiskRemainingRatedWriteEndurance = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 49), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskRemainingRatedWriteEndurance.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskRemainingRatedWriteEndurance.setDescription('Provides PCIe SSD device Remaining Rated Write Endurance Possible Values: 0-100% ,Not Available/Not Applicable ')
arrayDiskSectorSize = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 50), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskSectorSize.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskSectorSize.setDescription('Provides the information regarding sector size of array disk. Possible values are: 512 or 4096.')
arrayDiskPICapable = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 51), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskPICapable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskPICapable.setDescription('Provides the information regarding T10 PI capability of Array disk. Possible values are: 0 (T10 PI incapable) or 1 (T10 PI capable).')
arrayDiskMaxLinkWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 52), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskMaxLinkWidth.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskMaxLinkWidth.setDescription('Provides the information regarding Max Link Width of Array disk. Possible values are: 0 - 8.')
arrayDiskNegotiatedLinkWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 4, 1, 53), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskNegotiatedLinkWidth.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskNegotiatedLinkWidth.setDescription('Provides the information regarding Negtioated Link Width of Array disk. Possible values are: 0 - 8.')
arrayDiskEnclosureConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5), )
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionTable.setDescription('A table of the connections between array disks, their enclosure, and their associated controller. For each object in the table, its object number corresponds to an instance number in the appropriate MIB table for that object where all of the object properties can be found. Note: Only array disks that are part of an enclosure will be listed in this table. Backplanes are considered enclosures by Storage Management.')
arrayDiskEnclosureConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1), ).setIndexNames((0, "StorageManagement-MIB", "arrayDiskEnclosureConnectionNumber"))
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEntry.setDescription('An entry in the Array Disk Enclosure Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
arrayDiskEnclosureConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionNumber.setDescription('Instance number of this array disk connection entry. This table shows the array disk to enclosure relationship.')
arrayDiskEnclosureConnectionArrayDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionArrayDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionArrayDiskName.setDescription('The name of the array disk in this connection as represented in Storage Management.')
arrayDiskEnclosureConnectionArrayDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionArrayDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionArrayDiskNumber.setDescription('The instance number of the array disk in the arrayDiskTable in this connection.')
arrayDiskEnclosureConnectionEnclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEnclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEnclosureName.setDescription('The name of the enclosure as represented in Storage Management to which this array disk belongs. ')
arrayDiskEnclosureConnectionEnclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEnclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionEnclosureNumber.setDescription('The instance number in the enclosureTable of the enclosure to which this array disk belongs.')
arrayDiskEnclosureConnectionControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionControllerName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionControllerName.setDescription('The name of the controller as represented in Storage Management to which this array disk is connected.')
arrayDiskEnclosureConnectionControllerNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 5, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionControllerNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskEnclosureConnectionControllerNumber.setDescription('The instance number in the controllerTable of the controller to which this array disk is connected.')
arrayDiskChannelConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6), )
if mibBuilder.loadTexts: arrayDiskChannelConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionTable.setDescription('A table of the connections between array disks, their channel, and their associated controller. For each object in the table, its object number corresponds to an instance number in the appropriate MIB table for that object where all of the object properties can be found. Note: Only array disks that are NOT part of an enclosure will be listed in this table. Backplanes are considered enclosures by Storage Management.')
arrayDiskChannelConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1), ).setIndexNames((0, "StorageManagement-MIB", "arrayDiskChannelConnectionNumber"))
if mibBuilder.loadTexts: arrayDiskChannelConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionEntry.setDescription('An entry in the Array Disk Channel Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
arrayDiskChannelConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionNumber.setDescription('Instance number of this array disk connection entry. This table shows the array disk to SCSI channel relationship. There is no enclosure service processor associated with these array disks.')
arrayDiskChannelConnectionArrayDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionArrayDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionArrayDiskName.setDescription('The name of the array disk in this connection as represented in Storage Management.')
arrayDiskChannelConnectionArrayDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionArrayDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionArrayDiskNumber.setDescription('The instance number of the array disk in the arrayDiskTable in this connection.')
arrayDiskChannelConnectionChannelName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionChannelName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionChannelName.setDescription('The name of the channel as represented in Storage Management to which is array disk is connected.')
arrayDiskChannelConnectionChannelNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionChannelNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionChannelNumber.setDescription('The instance number of the channel in the channelTable to which this array disk is connected.')
arrayDiskChannelConnectionControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionControllerName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionControllerName.setDescription('The name of the controller as represented in Storage Management to which this array disk is connected.')
arrayDiskChannelConnectionControllerNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 6, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskChannelConnectionControllerNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskChannelConnectionControllerNumber.setDescription('The instance number in the controllerTable of the controller to which this array disk is connected.')
fanTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7), )
if mibBuilder.loadTexts: fanTable.setStatus('mandatory')
if mibBuilder.loadTexts: fanTable.setDescription('A table of managed Fans. The number of entries is related to number of Fans discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
fanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1), ).setIndexNames((0, "StorageManagement-MIB", "fanNumber"))
if mibBuilder.loadTexts: fanEntry.setStatus('mandatory')
if mibBuilder.loadTexts: fanEntry.setDescription('An entry in the Fan table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
fanNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fanNumber.setDescription('Instance number of this fan entry.')
fanName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanName.setStatus('mandatory')
if mibBuilder.loadTexts: fanName.setDescription("The fan's name as represented in Storage Management.")
fanVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanVendor.setStatus('mandatory')
if mibBuilder.loadTexts: fanVendor.setDescription("The fan's (re)seller's name.")
fanState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 6, 11, 21))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("degraded", 6), ("removed", 11), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanState.setStatus('mandatory')
if mibBuilder.loadTexts: fanState.setDescription('The current condition of the fan. Possible states: 0: Unknown 1: Ready 2: Failed 3: Online 4: Offline 6: Degraded 21: Missing')
fanSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: fanSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
fanProbeUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeUnit.setStatus('obsolete')
if mibBuilder.loadTexts: fanProbeUnit.setDescription('This entry is obsolete for Storage Management.')
fanProbeMinWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeMinWarning.setStatus('obsolete')
if mibBuilder.loadTexts: fanProbeMinWarning.setDescription('This entry is obsolete. This setting is not supported by fans managed under Storage Management.')
fanProbeMinCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeMinCritical.setStatus('obsolete')
if mibBuilder.loadTexts: fanProbeMinCritical.setDescription('This entry is obsolete. This setting is not supported by fans managed under Storage Management.')
fanProbeMaxWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeMaxWarning.setStatus('obsolete')
if mibBuilder.loadTexts: fanProbeMaxWarning.setDescription('This entry is obsolete. This setting is not supported by fans managed under Storage Management.')
fanProbeMaxCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeMaxCritical.setStatus('obsolete')
if mibBuilder.loadTexts: fanProbeMaxCritical.setDescription('This entry is obsolete. This setting is not supported by fans managed under Storage Management.')
fanProbeCurrValue = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanProbeCurrValue.setStatus('mandatory')
if mibBuilder.loadTexts: fanProbeCurrValue.setDescription('The current speed of the fan. ')
fan1PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fan1PartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fan1PartNumber.setDescription('The part number of the fan in the enclosure.')
fan2PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fan2PartNumber.setStatus('obsolete')
if mibBuilder.loadTexts: fan2PartNumber.setDescription('This entry is obsolete. This setting is not supported by fans managed under Storage Management.')
fanRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 14), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: fanRollUpStatus.setDescription('Severity of the fan state. This is the combined status of the fan and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
fanComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 15), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: fanComponentStatus.setDescription('The status of the fan itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
fanNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 16), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: fanNexusID.setDescription('Durable unique ID for this fan.')
fanRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 7, 1, 17), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanRevision.setStatus('mandatory')
if mibBuilder.loadTexts: fanRevision.setDescription('The revision number of the fan in the enclosure.')
fanConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8), )
if mibBuilder.loadTexts: fanConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionTable.setDescription('A table of the connections between each fan on the managed node and its enclosure. Each enclosure number in the table corresponds to that enclosure instance in the Enclosure Table.')
fanConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1), ).setIndexNames((0, "StorageManagement-MIB", "fanConnectionNumber"))
if mibBuilder.loadTexts: fanConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionEntry.setDescription('An entry in the Fan Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
fanConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionNumber.setDescription('Instance number of this fan connection entry.')
fanConnectionFanName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanConnectionFanName.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionFanName.setDescription('The name of the fan in this connection as represented in Storage Management.')
fanConnectionFanNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanConnectionFanNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionFanNumber.setDescription('The instance number of the fan in the fanTable in this connection.')
fanConnectionEnclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanConnectionEnclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionEnclosureName.setDescription('The name of the enclosure as represented in Storage Management to which this fan belongs.')
fanConnectionEnclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 8, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fanConnectionEnclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fanConnectionEnclosureNumber.setDescription('The instance number of the enclosure in the enclosureTable to which this fan belongs.')
powerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9), )
if mibBuilder.loadTexts: powerSupplyTable.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyTable.setDescription('A table of managed Power Supplies. The number of entries is related to number of Power Supplies discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
powerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1), ).setIndexNames((0, "StorageManagement-MIB", "powerSupplyNumber"))
if mibBuilder.loadTexts: powerSupplyEntry.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyEntry.setDescription('An entry in the Power Supply table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
powerSupplyNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyNumber.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyNumber.setDescription('Instance number of this power supply entry.')
powerSupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyName.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyName.setDescription("The power supply's name as represented in Storage Management.")
powerSupplyVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyVendor.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyVendor.setDescription("The power supply's (re)seller's name.")
powerSupplyState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 5, 6, 11, 21))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("notInstalled", 5), ("degraded", 6), ("removed", 11), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyState.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyState.setDescription('The current condition of the power supply. Possible states: 0 : Unknown 1 : Ready 2 : Failed 5 : Not Installed 6 : Degraded 11: Removed 21: Missing')
powerSupplySeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplySeverity.setStatus('obsolete')
if mibBuilder.loadTexts: powerSupplySeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
powerSupply1PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupply1PartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupply1PartNumber.setDescription('The part number of the power supply of the enclosure.')
powerSupply2PartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupply2PartNumber.setStatus('obsolete')
if mibBuilder.loadTexts: powerSupply2PartNumber.setDescription('This entry is obsolete. This setting is not supported by power supplies managed under Storage Management.')
powerSupplyRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 8), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyRollUpStatus.setDescription('Severity of the power supply state. This is the combined status of the power supply and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
powerSupplyComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 9), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyComponentStatus.setDescription('The status of the power supply itself without the propegation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
powerSupplyNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyNexusID.setDescription('Durable unique ID for this power supply.')
powerSupplyRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 9, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyRevision.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyRevision.setDescription('The revision number of the power supply in the enclosure.')
powerSupplyConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10), )
if mibBuilder.loadTexts: powerSupplyConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionTable.setDescription('A table of the connections between each power supply on the managed node and its enclosure. Each enclosure number in the table corresponds to that enclosure instance in the Enclosure Table.')
powerSupplyConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1), ).setIndexNames((0, "StorageManagement-MIB", "powerSupplyConnectionNumber"))
if mibBuilder.loadTexts: powerSupplyConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionEntry.setDescription('An entry in the Power Supply Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
powerSupplyConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionNumber.setDescription('Instance number of this power supply connection entry.')
powerSupplyConnectionPowersupplyName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionPowersupplyName.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionPowersupplyName.setDescription('The name of the power supply in this connection as represented in Storage Management.')
powerSupplyConnectionPowersupplyNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionPowersupplyNumber.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionPowersupplyNumber.setDescription('This instance number in the powersupplyTable of the power supply in this connection.')
powerSupplyConnectionEnclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionEnclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionEnclosureName.setDescription('The name of the enclosure to which this power supply is connected as represented in Storage Management.')
powerSupplyConnectionEnclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionEnclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionEnclosureNumber.setDescription('The instance number of the enclosure in the enclosureTable to which this power supply is connected.')
powerSupplyConnectionFirmwareVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 10, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: powerSupplyConnectionFirmwareVersion.setStatus('mandatory')
if mibBuilder.loadTexts: powerSupplyConnectionFirmwareVersion.setDescription('Displays power supply connection firmware version. Note: Available above 1.04 firmware version')
temperatureProbeTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11), )
if mibBuilder.loadTexts: temperatureProbeTable.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeTable.setDescription('A table of managed Temperature Probes. The number of entries is related to number of Temperature Probes discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
temperatureProbeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1), ).setIndexNames((0, "StorageManagement-MIB", "temperatureProbeNumber"))
if mibBuilder.loadTexts: temperatureProbeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeEntry.setDescription('An entry in the Temperature Probe table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
temperatureProbeNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeNumber.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeNumber.setDescription('Instance number of this temperature probe entry.')
temperatureProbeName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeName.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeName.setDescription('Name of this temperature probe as represented in Storage Management.')
temperatureProbeVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeVendor.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeVendor.setDescription("Temperature probe's (re)seller's name.")
temperatureProbeState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 6, 9, 21))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("offline", 4), ("degraded", 6), ("inactive", 9), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeState.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeState.setDescription('The current condition of the temperature probe. Possible states: 0: Unknown 1: Ready 2: Failed (Minimum Failure Threshold Exceeded, Maximum Failure Threshold Exceeded) 4: Offline 6: Degraded (Minimum Warning Threshold Exceeded, Maximum Warning Threshold Exceeded) 9: Inactive 21: Missing')
temperatureProbeSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: temperatureProbeSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
temperatureProbeUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeUnit.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeUnit.setDescription('The units that will be used to display temperatures for this temperature probe.')
temperatureProbeMinWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeMinWarning.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeMinWarning.setDescription('The minimum temperature that will force the probe into a warning state.')
temperatureProbeMinCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeMinCritical.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeMinCritical.setDescription('The minimum temperature that will force the probe into an error state.')
temperatureProbeMaxWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeMaxWarning.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeMaxWarning.setDescription('The maximum temperature that will force the probe into a warning state.')
temperatureProbeMaxCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeMaxCritical.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeMaxCritical.setDescription('The maximum temperature that will force the probe into an error state.')
temperatureProbeCurValue = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeCurValue.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeCurValue.setDescription('The current temperature of this probe.')
temperatureProbeRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 12), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeRollUpStatus.setDescription('Severity of the temperature probe state. This is the combined status of the temperature probe and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
temperatureProbeComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 13), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeComponentStatus.setDescription('The status of the temperature probe itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
temperatureProbeNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 11, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureProbeNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureProbeNexusID.setDescription('Durable unique ID for this temperature probe.')
temperatureConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12), )
if mibBuilder.loadTexts: temperatureConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionTable.setDescription('A table of the connections between each temperature probe on the managed node and its enclosure. Each enclosure number in the table corresponds to that enclosure instance in the Enclosure Table.')
temperatureConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1), ).setIndexNames((0, "StorageManagement-MIB", "temperatureConnectionNumber"))
if mibBuilder.loadTexts: temperatureConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionEntry.setDescription('An entry in the Temperature Probe Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
temperatureConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionNumber.setDescription('Instance number of this temperature probe connection entry.')
temperatureConnectionTemperatureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureConnectionTemperatureName.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionTemperatureName.setDescription('The name of the temperature probe in this connection as represented in Storage Management.')
temperatureConnectionTemperatureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureConnectionTemperatureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionTemperatureNumber.setDescription('The instance number in the temperatureTable of the temperature probe in this connection.')
temperatureConnectionEnclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureConnectionEnclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionEnclosureName.setDescription('The name of the enclosure as represented in Storage Management to which this temperature probe belongs.')
temperatureConnectionEnclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 12, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: temperatureConnectionEnclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: temperatureConnectionEnclosureNumber.setDescription('The instance number of the enclosure in the enclosureTable to which this temperature probe belongs. ')
enclosureManagementModuleTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13), )
if mibBuilder.loadTexts: enclosureManagementModuleTable.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleTable.setDescription('A table of managed Enclosure Management Modules. The number of entries is related to number of Enclosure Management Modules discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
enclosureManagementModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1), ).setIndexNames((0, "StorageManagement-MIB", "enclosureManagementModuleNumber"))
if mibBuilder.loadTexts: enclosureManagementModuleEntry.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleEntry.setDescription('An entry in the Enclosure Management Module table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
enclosureManagementModuleNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleNumber.setDescription('Instance number of this enclosure memory module entry.')
enclosureManagementModuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleName.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleName.setDescription('Name of this enclosure memory module as represented in Storage Management.')
enclosureManagementModuleVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleVendor.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleVendor.setDescription("The management module's (re)seller's name.")
enclosureManagementModuleState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 21))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("notInstalled", 5), ("degraded", 6), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleState.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleState.setDescription('The current condition of the enclosure management module. Possible states: 0: Unknown 1: Ready 2: Failed 3: Online 4: Offline 5: Not Installed 6: Degraded 21: Missing')
enclosureManagementModuleSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: enclosureManagementModuleSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
enclosureManagementModulePartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModulePartNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModulePartNumber.setDescription('The part number of the enclosure memory module.')
enclosureManagementModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("eMM", 1), ("terminationCard", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleType.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleType.setDescription('The type of the enclosure management module. Possible values: 0: Unknown 1: EMM 2: Termination Card')
enclosureManagementModuleFWVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleFWVersion.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleFWVersion.setDescription('Firmware version of the enclosure memory module.')
enclosureManagementModuleMaxSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleMaxSpeed.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleMaxSpeed.setDescription('The maximum bus speed of the enclosure management module.')
enclosureManagementModuleRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 10), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleRollUpStatus.setDescription('Severity of the enclosure management module state. This is the combined status of the EMM and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
enclosureManagementModuleComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 11), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleComponentStatus.setDescription('The status of the enclosure management module itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
enclosureManagementModuleNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleNexusID.setDescription('Durable unique ID for this EMM.')
enclosureManagementModuleRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 13, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleRevision.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleRevision.setDescription('The revision number of the enclosure management module.')
enclosureManagementModuleConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14), )
if mibBuilder.loadTexts: enclosureManagementModuleConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionTable.setDescription('A table of the connections between each enclosure management module on the managed node and its enclosure. Each enclosure number in the table corresponds to that enclosure instance in the Enclosure Table.')
enclosureManagementModuleConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1), ).setIndexNames((0, "StorageManagement-MIB", "enclosureManagementModuleConnectionNumber"))
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEntry.setDescription('An entry in the Enclosure Management Module Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
enclosureManagementModuleConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionNumber.setDescription('Instance number of this enclosure management module connection entry.')
enclosureManagementModuleConnectionEMMName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEMMName.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEMMName.setDescription('The name of the enclosure management module in this connection as represented in Storage Management.')
enclosureManagementModuleConnectionEMMNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEMMNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEMMNumber.setDescription('The instance number of the enclosure management module in the enclosureManagementModuleTable in this connection.')
enclosureManagementModuleConnectionEnclosureName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEnclosureName.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEnclosureName.setDescription('The name of the enclosure as represented in Storage Management to which this enclosure management module belongs.')
enclosureManagementModuleConnectionEnclosureNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 14, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEnclosureNumber.setStatus('mandatory')
if mibBuilder.loadTexts: enclosureManagementModuleConnectionEnclosureNumber.setDescription('The instance number of the enclosure in the enclosureTable to which this enclosure management module belongs.')
batteryTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15), )
if mibBuilder.loadTexts: batteryTable.setStatus('mandatory')
if mibBuilder.loadTexts: batteryTable.setDescription('A table of managed Batteries. The number of entries is related to number of Batteries discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
batteryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1), ).setIndexNames((0, "StorageManagement-MIB", "batteryNumber"))
if mibBuilder.loadTexts: batteryEntry.setStatus('mandatory')
if mibBuilder.loadTexts: batteryEntry.setDescription('An entry in the Battery table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
batteryNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryNumber.setStatus('mandatory')
if mibBuilder.loadTexts: batteryNumber.setDescription('Instance number of this battery entry.')
batteryName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryName.setStatus('mandatory')
if mibBuilder.loadTexts: batteryName.setDescription("The battery's name as represented in Storage Management.")
batteryVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryVendor.setStatus('mandatory')
if mibBuilder.loadTexts: batteryVendor.setDescription("The battery's (re)seller's name.")
batteryState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 6, 7, 9, 10, 12, 21, 36))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("degraded", 6), ("reconditioning", 7), ("high", 9), ("low", 10), ("charging", 12), ("missing", 21), ("learning", 36)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryState.setStatus('mandatory')
if mibBuilder.loadTexts: batteryState.setDescription('Current state of battery. Possible values: 0: Unknown 1: Ready 2: Failed 6: Degraded 7: Reconditioning 9: High 10: Low Power 12: Charging 21: Missing 36: Learning ')
batteryRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 5), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: batteryRollUpStatus.setDescription('Severity of the battery state. This is the combined status of the battery and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
batteryComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 6), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: batteryComponentStatus.setDescription('The status of the battery itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
batteryChargeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryChargeCount.setStatus('mandatory')
if mibBuilder.loadTexts: batteryChargeCount.setDescription('The number of charges that have been applied to the battery.')
batteryMaxChargeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryMaxChargeCount.setStatus('mandatory')
if mibBuilder.loadTexts: batteryMaxChargeCount.setDescription('The maximum number of charges that can be applied to the battery.')
batteryNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: batteryNexusID.setDescription('Durable unique ID for this battery.')
batteryPredictedCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4))).clone(namedValues=NamedValues(("failed", 1), ("ready", 2), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryPredictedCapacity.setStatus('obsolete')
if mibBuilder.loadTexts: batteryPredictedCapacity.setDescription("This property displays the battery's ability to be charged. Possible values: 1: Failed - The battery cannot be charged and needs to be replaced. 2: Ready - The battery can be charged to full capacity. 4: Unknown - The battery is completing a Learn cycle. The charge capacity of the battery cannot be determined until the Learn cycle is complete. ")
batteryNextLearnTime = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryNextLearnTime.setStatus('deprecated')
if mibBuilder.loadTexts: batteryNextLearnTime.setDescription('Time next learn cycle must be executed (in hours.) ')
batteryLearnState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8, 16, 32))).clone(namedValues=NamedValues(("failed", 1), ("active", 2), ("timedOut", 4), ("requested", 8), ("idle", 16), ("due", 32)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryLearnState.setStatus('deprecated')
if mibBuilder.loadTexts: batteryLearnState.setDescription('Specifies the learn state activity of the battery. Possible values: 1 : Failed 2 : Active 4 : Timed out 8 : Requested 16: Idle 32: Due')
batteryID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryID.setStatus('mandatory')
if mibBuilder.loadTexts: batteryID.setDescription('Represents unique id for battery.')
batteryMaxLearnDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryMaxLearnDelay.setStatus('deprecated')
if mibBuilder.loadTexts: batteryMaxLearnDelay.setDescription('The maximum amount of time (in hours) that the battery learn cycle can be delayed.')
batteryLearnMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 15, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8))).clone(namedValues=NamedValues(("auto", 1), ("warn", 2), ("autowarn", 4), ("unknown", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: batteryLearnMode.setStatus('deprecated')
if mibBuilder.loadTexts: batteryLearnMode.setDescription('Indicates current learn mode of the battery. Possible values: 1: Auto 2: Warn 4: Autowarn 8: Unknown ')
batteryConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16), )
if mibBuilder.loadTexts: batteryConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionTable.setDescription('A table of the connections between each battery on the managed node and its controller. Each controller number in the table corresponds to that controller instance in the Controller Table.')
batteryConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1), ).setIndexNames((0, "StorageManagement-MIB", "batteryConnectionNumber"))
if mibBuilder.loadTexts: batteryConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionEntry.setDescription('An entry in the Battery Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
batteryConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionNumber.setDescription('Instance number of this battery connection entry.')
batteryConnectionBatteryName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryConnectionBatteryName.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionBatteryName.setDescription('The name of the battery in this connection as represented in Storage Management.')
batteryConnectionBatteryNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryConnectionBatteryNumber.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionBatteryNumber.setDescription('The instance number in the batteryTable of the battery in this connection.')
batteryConnectionControllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryConnectionControllerName.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionControllerName.setDescription('The name of the controller as represented in Storage Management to which this battery belongs.')
batteryConnectionControllerNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 16, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: batteryConnectionControllerNumber.setStatus('mandatory')
if mibBuilder.loadTexts: batteryConnectionControllerNumber.setDescription('The instance number of the controller in the controllerTable to which this battery belongs.')
controllerTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1), )
if mibBuilder.loadTexts: controllerTable.setStatus('mandatory')
if mibBuilder.loadTexts: controllerTable.setDescription('A table of managed RAID controllers. The number of entries is related to number of RAID controllers discovered in the system. Note: The properties in this table may not be applicable to all entries.')
controllerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1), ).setIndexNames((0, "StorageManagement-MIB", "controllerNumber"))
if mibBuilder.loadTexts: controllerEntry.setStatus('mandatory')
if mibBuilder.loadTexts: controllerEntry.setDescription('An entry in the table of RAID controllers. A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
controllerNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerNumber.setStatus('mandatory')
if mibBuilder.loadTexts: controllerNumber.setDescription('Instance number of this controller entry.')
controllerName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerName.setStatus('mandatory')
if mibBuilder.loadTexts: controllerName.setDescription('The name of the controller in this subsystem as represented in Storage Management. Includes the controller type and instance. For example: Perc3/QC 1.')
controllerVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerVendor.setStatus('mandatory')
if mibBuilder.loadTexts: controllerVendor.setDescription("The controller's (re)seller's name.")
controllerType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 9))).clone(namedValues=NamedValues(("scsi", 1), ("pv660F", 2), ("pv662F", 3), ("ide", 4), ("sata", 5), ("sas", 6), ("pciessd", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerType.setStatus('mandatory')
if mibBuilder.loadTexts: controllerType.setDescription('The type of this controller: 1: SCSI 2: PV660F 3: PV662F 4: IDE (Integrated/Intelligent Drive Electronics) 5: SATA (Serial Advanced Technology Attachment) 6: SAS (Serial Attached SCSI) 9: PCIe SSD')
controllerState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("degraded", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerState.setStatus('mandatory')
if mibBuilder.loadTexts: controllerState.setDescription("The current condition of the controller's subsystem (which includes any devices connected to it.) Possible states: 0: Unknown 1: Ready 2: Failed 3: Online 4: Offline 6: Degraded")
controllerSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: controllerSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
controllerRebuildRateInPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerRebuildRateInPercent.setStatus('mandatory')
if mibBuilder.loadTexts: controllerRebuildRateInPercent.setDescription('The percent of the compute cycles dedicated to rebuilding failed array disks. ')
controllerFWVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerFWVersion.setStatus('mandatory')
if mibBuilder.loadTexts: controllerFWVersion.setDescription("The controller's current firmware version.")
controllerCacheSizeInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerCacheSizeInMB.setStatus('mandatory')
if mibBuilder.loadTexts: controllerCacheSizeInMB.setDescription("The controller's current amount of cache memory in megabytes. If this size is 0, it is less than a megabyte.")
controllerCacheSizeInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerCacheSizeInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: controllerCacheSizeInBytes.setDescription("The controller's current amount of cache memory that is less than a megabyte. This combined with the controllerCacheSizeInMB will be the total amount of memory.")
controllerPhysicalDeviceCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPhysicalDeviceCount.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPhysicalDeviceCount.setDescription('Number of physical devices on the controller channel including both disks and the controller.')
controllerLogicalDeviceCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerLogicalDeviceCount.setStatus('mandatory')
if mibBuilder.loadTexts: controllerLogicalDeviceCount.setDescription('Number of virtual disks on the controller.')
controllerPartnerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPartnerStatus.setStatus('obsolete')
if mibBuilder.loadTexts: controllerPartnerStatus.setDescription('This entry is obsolete for Storage Management.')
controllerHostPortCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerHostPortCount.setStatus('obsolete')
if mibBuilder.loadTexts: controllerHostPortCount.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerMemorySizeInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerMemorySizeInMB.setStatus('mandatory')
if mibBuilder.loadTexts: controllerMemorySizeInMB.setDescription('Size of memory in megabytes on the controller. If this size is 0, it is less than a megabyte. This attribute is only supported on Adaptec controllers. ')
controllerMemorySizeInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerMemorySizeInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: controllerMemorySizeInBytes.setDescription('Size of memory on the controller that is less than a megabyte. This combined with the controllerMemorySizeInMB will be the total size of the memory. This attribute is only supported on Adaptec controllers.')
controllerDriveChannelCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerDriveChannelCount.setStatus('obsolete')
if mibBuilder.loadTexts: controllerDriveChannelCount.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerFaultTolerant = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerFaultTolerant.setStatus('mandatory')
if mibBuilder.loadTexts: controllerFaultTolerant.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0Port0WWN = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 19), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Port0WWN.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Port0WWN.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0Port0Name = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 20), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Port0Name.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Port0Name.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0Port0ID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Port0ID.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Port0ID.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0Target = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Target.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Target.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management. ')
controllerC0Channel = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Channel.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Channel.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0OSController = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 24), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0OSController.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0OSController.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0BatteryState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 7, 9, 10, 12, 21))).clone(namedValues=NamedValues(("ok", 1), ("failed", 2), ("reconditioning", 7), ("high", 9), ("low", 10), ("charging", 12), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0BatteryState.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0BatteryState.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management. ')
controllerC1Port0WWN = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 26), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Port0WWN.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Port0WWN.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1Port0Name = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 27), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Port0Name.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Port0Name.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1Port0ID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Port0ID.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Port0ID.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1Target = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Target.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Target.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1Channel = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Channel.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Channel.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1OSController = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 31), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1OSController.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1OSController.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1BatteryState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 7, 9, 10, 12, 21))).clone(namedValues=NamedValues(("ok", 1), ("failed", 2), ("reconditioning", 7), ("high", 9), ("low", 10), ("charging", 12), ("missing", 21)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1BatteryState.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1BatteryState.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management. ')
controllerNodeWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 33), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerNodeWWN.setStatus('obsolete')
if mibBuilder.loadTexts: controllerNodeWWN.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC0Port1WWN = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 34), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC0Port1WWN.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC0Port1WWN.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerC1Port1WWN = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 35), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerC1Port1WWN.setStatus('obsolete')
if mibBuilder.loadTexts: controllerC1Port1WWN.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerBatteryChargeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 36), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerBatteryChargeCount.setStatus('obsolete')
if mibBuilder.loadTexts: controllerBatteryChargeCount.setDescription('This entry is obsolete. Fibre channel is not supported in Storage Management.')
controllerRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 37), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: controllerRollUpStatus.setDescription('Severity of the controller state. This is the combined status of the controller and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
controllerComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 38), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: controllerComponentStatus.setDescription('The status of the controller itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
controllerNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 39), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: controllerNexusID.setDescription('Durable unique ID for this controller.')
controllerAlarmState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("not-applicable", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerAlarmState.setStatus('mandatory')
if mibBuilder.loadTexts: controllerAlarmState.setDescription("State, or setting for the controller's alarm. Possible values: 1: Enabled 2: Disabled 3: Not Applicable")
controllerDriverVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 41), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerDriverVersion.setStatus('mandatory')
if mibBuilder.loadTexts: controllerDriverVersion.setDescription('Currently installed driver version for this controller.')
controllerPCISlot = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 42), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPCISlot.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPCISlot.setDescription('The PCI slot number or embedded number for controllers on the motherboard. ')
controllerClusterMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 43), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 99))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("active", 3), ("notApplicable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerClusterMode.setStatus('mandatory')
if mibBuilder.loadTexts: controllerClusterMode.setDescription('Identifies if the controller is in cluster mode. Possible values: 1 : Enabled 2 : Disabled 3 : Active (enabled and active) 99: Not Applicable ')
controllerMinFWVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 44), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerMinFWVersion.setStatus('mandatory')
if mibBuilder.loadTexts: controllerMinFWVersion.setDescription('The minimum firmware version for Storage Management to support the controller. ')
controllerMinDriverVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 45), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerMinDriverVersion.setStatus('mandatory')
if mibBuilder.loadTexts: controllerMinDriverVersion.setDescription('The minimum driver version for Storage Management to support the controller. ')
controllerSCSIInitiatorID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 46), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerSCSIInitiatorID.setStatus('mandatory')
if mibBuilder.loadTexts: controllerSCSIInitiatorID.setDescription('The SCSI ID of the initiator.')
controllerChannelCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 47), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerChannelCount.setStatus('mandatory')
if mibBuilder.loadTexts: controllerChannelCount.setDescription('Number of channels on the controller.')
controllerReconstructRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 48), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerReconstructRate.setStatus('mandatory')
if mibBuilder.loadTexts: controllerReconstructRate.setDescription('The rate for reconstruct on the controller. ')
controllerPatrolReadRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 49), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPatrolReadRate.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPatrolReadRate.setDescription('The rate for patrol read on the controller. ')
controllerBGIRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 50), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerBGIRate.setStatus('mandatory')
if mibBuilder.loadTexts: controllerBGIRate.setDescription('The rate for background initialization on the controller. ')
controllerCheckConsistencyRate = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 51), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerCheckConsistencyRate.setStatus('mandatory')
if mibBuilder.loadTexts: controllerCheckConsistencyRate.setDescription('The rate for check consistency on the controller. ')
controllerPatrolReadMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("automatic", 1), ("manual", 2), ("disabled", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPatrolReadMode.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPatrolReadMode.setDescription('Identifies the patrol read mode. Possible values: 1: Automatic (enabled) 2: Manual (enabled) 3: Disabled ')
controllerPatrolReadState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 53), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8))).clone(namedValues=NamedValues(("stopped", 1), ("ready", 2), ("active", 4), ("aborted", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPatrolReadState.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPatrolReadState.setDescription('The state of the patrol read. 1: Stopped - not running 2: Ready - ready to start 4: Active - is running 8: Aborted - has aborted ')
controllerPatrolReadIterations = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 54), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPatrolReadIterations.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPatrolReadIterations.setDescription('The number of times Patrol Read has been run on this controller.')
controllerStorportDriverVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 55), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerStorportDriverVersion.setStatus('mandatory')
if mibBuilder.loadTexts: controllerStorportDriverVersion.setDescription('Provide current Windows OS storport driver version. Not applicable for Linux.')
controllerMinRequiredStorportVer = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 56), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerMinRequiredStorportVer.setStatus('mandatory')
if mibBuilder.loadTexts: controllerMinRequiredStorportVer.setDescription('Provides minimum required storport driver version for Windows OS only. ')
controllerEncryptionCapable = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 57), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerEncryptionCapable.setStatus('mandatory')
if mibBuilder.loadTexts: controllerEncryptionCapable.setDescription('Indicates Encryption capability of the controller. Value: 1 - Capable, 99 - NotApplicable')
controllerEncryptionKeyPresent = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 58), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: controllerEncryptionKeyPresent.setStatus('mandatory')
if mibBuilder.loadTexts: controllerEncryptionKeyPresent.setDescription('Indicates presence of Encryption Key for the controller. Value: 1 - Yes, 0 - No, 99 - NotApplicable')
controllerPersistentHotSpare = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 59), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPersistentHotSpare.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPersistentHotSpare.setDescription('Indicates Persistent Hot Spare capability of the controller Value: 1 - Enabled, 0 - Disabled, 99 - Undetermined / Not applicable')
controllerSpinDownUnconfiguredDrives = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 60), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerSpinDownUnconfiguredDrives.setStatus('mandatory')
if mibBuilder.loadTexts: controllerSpinDownUnconfiguredDrives.setDescription('Indicates controller capability to put unconfigured drives in power save mode. Value: 1 - Enabled, 0 - Disabled, 99 - Undetermined / Not applicable')
controllerSpinDownHotSpareDrives = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 61), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerSpinDownHotSpareDrives.setStatus('mandatory')
if mibBuilder.loadTexts: controllerSpinDownHotSpareDrives.setDescription('Indicates controller capability to put hot spare drives in power save mode. Value: 1 - Enabled, 0 - Disabled, 99 - Undetermined / Not applicable')
controllerSpinDownTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 62), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1440))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: controllerSpinDownTimeInterval.setStatus('mandatory')
if mibBuilder.loadTexts: controllerSpinDownTimeInterval.setDescription('Shows the duration in minutes after which, the unconfigured or hot spare drives will be spun down to power save mode. Value: 30 to 1440 Note: A value of 9999 indicates feature not available.')
controllerEncryptionMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 63), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerEncryptionMode.setStatus('mandatory')
if mibBuilder.loadTexts: controllerEncryptionMode.setDescription('Indicates the current encryption mode of the controller. Value: 0 - No Encryption, 1 - Local Key Management (LKM), 2 - Dell Key Management (DKM), 99 - Not Applicable ')
controllerCacheCade = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 64), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerCacheCade.setStatus('mandatory')
if mibBuilder.loadTexts: controllerCacheCade.setDescription('Indicates if the controller is CacheCade capable or not. Value: 1 - Capable, 0 - Not Capable, 99 - Undetermined')
controllerSpinDownConfiguredDrives = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 65), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerSpinDownConfiguredDrives.setStatus('mandatory')
if mibBuilder.loadTexts: controllerSpinDownConfiguredDrives.setDescription('Indicates controller capability to spin down configured physical disks. Value: 0 - Disabled, 1 - Enabled, 99 - Undetermined ')
controllerAutomaticPowerSaving = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 66), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerAutomaticPowerSaving.setStatus('mandatory')
if mibBuilder.loadTexts: controllerAutomaticPowerSaving.setDescription('Indicates controller capability for automatic power saving. Value: 0 - Disabled, 1 - Enabled, 99 - Undetermined')
controllerConfiguredDrivesSpinUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 67), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerConfiguredDrivesSpinUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: controllerConfiguredDrivesSpinUpTime.setDescription('Indicates configured drives spin up start time. Value: 1:00 AM to 12:59 PM, 9999 - Undetermined')
controllerConfiguredDrivesSpinUpTimeInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 68), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1440))).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerConfiguredDrivesSpinUpTimeInterval.setStatus('mandatory')
if mibBuilder.loadTexts: controllerConfiguredDrivesSpinUpTimeInterval.setDescription('Indicates configured drives spin up time interval in hours. This value is added with configured drives start time to arrive at time window in which configured drives are always spin up. Value: 1 .. 24, 9999 - Undetermined')
controllerPreservedCache = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 69), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPreservedCache.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPreservedCache.setDescription('Indicates if preserved cache is present on the controller. Values: 1- Yes, 0 - No, 99 - Not available / Not applicable')
controllerPIEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 1, 1, 70), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: controllerPIEnable.setStatus('mandatory')
if mibBuilder.loadTexts: controllerPIEnable.setDescription('Indicates if T10 PI is enabled on a controller. Value can be either 0 (T10 PI disabled) or 1 (T10 PI enabled).')
tapeDriveTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17), )
if mibBuilder.loadTexts: tapeDriveTable.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveTable.setDescription('A table of listed Tape Drives The number of entries is related to number of Tape Drives discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
tapeDriveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1), ).setIndexNames((0, "StorageManagement-MIB", "tapeDriveNumber"))
if mibBuilder.loadTexts: tapeDriveEntry.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveEntry.setDescription('An entry in the Tape Library table. A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
tapeDriveNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveNumber.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveNumber.setDescription('Instance number of this tape drive entry.')
tapeDriveName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveName.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveName.setDescription('The name of the tape drive as represented in Storage Management.')
tapeDriveVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveVendor.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveVendor.setDescription("The tape drive's manufacturer's name.")
tapeDriveProductID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveProductID.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveProductID.setDescription('The model number of the tape drive')
tapeDriveNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveNexusID.setDescription('Durable unique ID for this tape drive')
tapeDriveBusType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(8))).clone(namedValues=NamedValues(("sas", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveBusType.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveBusType.setDescription('The bus type of the tape drive. Possible values: 8. SAS')
tapeDriveSASAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveSASAddress.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveSASAddress.setDescription('The specified SAS address if this is a SAS tape drive. ')
tapeDriveMediaType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 130, 17, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4))).clone(namedValues=NamedValues(("tape", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tapeDriveMediaType.setStatus('mandatory')
if mibBuilder.loadTexts: tapeDriveMediaType.setDescription('The Media type of the tape drive. Possible Values: 4:Tape')
logicalDevices = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140))
arrayDiskLogicalConnectionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3), )
if mibBuilder.loadTexts: arrayDiskLogicalConnectionTable.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionTable.setDescription('A table of the connections between array disks, the virtual disk to which they belong, and their associated logical disk. For each object in the table, its object number corresponds to an instance number in the appropriate MIB table for that object where all of the object properties can be found. ')
arrayDiskLogicalConnectionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1), ).setIndexNames((0, "StorageManagement-MIB", "arrayDiskLogicalConnectionNumber"))
if mibBuilder.loadTexts: arrayDiskLogicalConnectionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionEntry.setDescription('An entry in the Array Disk Logical Connection table . A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
arrayDiskLogicalConnectionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionNumber.setDescription('Instance number of this array disk logical connection entry.')
arrayDiskLogicalConnectionArrayDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionArrayDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionArrayDiskName.setDescription('The name of the array disk in this logical connection.')
arrayDiskLogicalConnectionArrayDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionArrayDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionArrayDiskNumber.setDescription('The instance number of the array disk in this logical connection.')
arrayDiskLogicalConnectionVirtualDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionVirtualDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionVirtualDiskName.setDescription('The name of the virtual disk to which this array disk belongs.')
arrayDiskLogicalConnectionVirtualDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionVirtualDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionVirtualDiskNumber.setDescription('The instance number of the virtual disk to which this array disk belongs.')
arrayDiskLogicalConnectionDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionDiskName.setDescription('The name of the disk group to with this array disk belongs. This property is currently not supported.')
arrayDiskLogicalConnectionDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: arrayDiskLogicalConnectionDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: arrayDiskLogicalConnectionDiskNumber.setDescription('This instance number of the disk group to with this array disk belongs. This property is currently not supported.')
virtualDiskTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1), )
if mibBuilder.loadTexts: virtualDiskTable.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskTable.setDescription('A table of managed Virtual Disks. The number of entries is related to number of Virtual Disks discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
virtualDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1), ).setIndexNames((0, "StorageManagement-MIB", "virtualDiskNumber"))
if mibBuilder.loadTexts: virtualDiskEntry.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskEntry.setDescription('An entry in the Virtual Disk table. A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
virtualDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100000000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskNumber.setDescription('Instance number of this virtual disk entry.')
virtualDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskName.setDescription("The virtual disk's label generated by Storage Management or entered by the user.")
virtualDiskDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskDeviceName.setDescription("Device name used by this virtual disk's member disks.")
virtualDiskState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 6, 7, 15, 16, 18, 24, 26, 32, 35, 36, 52))).clone(namedValues=NamedValues(("ready", 1), ("failed", 2), ("online", 3), ("offline", 4), ("degraded", 6), ("verifying", 7), ("resynching", 15), ("regenerating", 16), ("failedRedundancy", 18), ("rebuilding", 24), ("formatting", 26), ("reconstructing", 32), ("initializing", 35), ("backgroundInit", 36), ("permanentlyDegraded", 52)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskState.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskState.setDescription('The current condition of this virtual disk (which includes any member array disks.) Possible states: 0: Unknown 1: Ready - The disk is accessible and has no known problems. 2: Failed - Access has been lost to the data or is about to be lost. 3: Online 4: Offline - The disk is not accessible. The disk may be corrupted or intermittently unavailable. 6: Degraded - The data on the virtual disk is no longer fault tolerant because one of the underlying disks is not online. 15: Resynching 16: Regenerating 24: Rebuilding 26: Formatting 32: Reconstructing 35: Initializing 36: Background Initialization 38: Resynching Paused 52: Permanently Degraded 54: Degraded Redundancy')
virtualDiskSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("warning", 1), ("error", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskSeverity.setStatus('obsolete')
if mibBuilder.loadTexts: virtualDiskSeverity.setDescription('This entry is obsolete for Storage Management. It was replaced with RollUpStatus and ComponentStatus for each device.')
virtualDiskLengthInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskLengthInMB.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskLengthInMB.setDescription('The size of this virtual disk in megabytes. If this size is 0, it is smaller than a megabyte.')
virtualDiskLengthInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskLengthInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskLengthInBytes.setDescription('The portion of the virtual disk in bytes that is smaller than a megabyte. This size plus the virtualDiskLengthInMB is the total size of the virtual disk.')
virtualDiskFreeSpaceInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskFreeSpaceInMB.setStatus('obsolete')
if mibBuilder.loadTexts: virtualDiskFreeSpaceInMB.setDescription('This entry is obsolete. This property is not supported for virtual disks managed under Storage Management.')
virtualDiskFreeSpaceInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskFreeSpaceInBytes.setStatus('obsolete')
if mibBuilder.loadTexts: virtualDiskFreeSpaceInBytes.setDescription('This entry is obsolete. This property is not supported for virtual disks managed under Storage Management.')
virtualDiskWritePolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 9))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("writeBack", 3), ("writeThrough", 4), ("enabledAlways", 5), ("enabledAlwaysSAS", 6), ("notApplicable", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskWritePolicy.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskWritePolicy.setDescription("Indicates whether the controller's write cache will be used when writing to a virtual disk. Possible values: 1: Enabled - Adaptec Write Cache Enabled Protected 2: Disabled - Adaptec Write Cache Disabled 3: LSI Write Back 4: LSI Write Through 5: Enabled Always - Adaptec only 6: Enabled Always - SAS only 9: Not Applicable")
virtualDiskReadPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 9))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("readAhead", 3), ("adaptiveReadAhead", 4), ("noReadAhead", 5), ("notApplicable", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskReadPolicy.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskReadPolicy.setDescription("Indicates whether the controller's read cache will be used when reading from a virtual disk. Possible values: 1: Enabled - Adaptec Read Cache Enabled 2: Disabled - Adaptec Read Cache Disabled 3: LSI Read Ahead 4: LSI Adaptive Read Ahead 5: LSI No Read Ahead 9: Not Applicable")
virtualDiskCachePolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 99))).clone(namedValues=NamedValues(("directIO", 1), ("cachedIO", 2), ("not-applicable", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskCachePolicy.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskCachePolicy.setDescription("Indicates whether the controller's cache is used when reading from or writing to a virtual disk. Possible values: 1: Direct I/O (LSI) 2: Cached I/O (LSI) 99: Not applicable")
virtualDiskLayout = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 20, 21, 22, 23, 24, 25))).clone(namedValues=NamedValues(("concatenated", 1), ("raid-0", 2), ("raid-1", 3), ("raid-2", 4), ("raid-3", 5), ("raid-4", 6), ("raid-5", 7), ("raid-6", 8), ("raid-7", 9), ("raid-10", 10), ("raid-30", 11), ("raid-50", 12), ("addSpares", 13), ("deleteLogical", 14), ("transformLogical", 15), ("raid-0-plus-1", 18), ("concatRaid-1", 19), ("concatRaid-5", 20), ("noRaid", 21), ("volume", 22), ("raidMorph", 23), ("raid-60", 24), ("cacheCade", 25)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskLayout.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskLayout.setDescription("The virtual disk's RAID type. Currently supported types: 1: Concatenated 2: RAID-0 3: RAID-1 7: RAID-5 8: RAID-6 10: RAID-10 12: RAID-50 19: Concatenated RAID 1 24: RAID-60 25: CacheCade ")
virtualDiskCurStripeSizeInMB = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskCurStripeSizeInMB.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskCurStripeSizeInMB.setDescription('The stripe size of this virtual disk in megabytes. If this size is 0, it is either smaller than a megabyte or not applicable. ')
virtualDiskCurStripeSizeInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskCurStripeSizeInBytes.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskCurStripeSizeInBytes.setDescription('The portion of the stripe size in bytes that is smaller than a megabyte. This size plus the virtualDiskCurStripeSizeInMB is the total stripe size on the virtual disk. If this size is 0, either the entire size is whole megabytes or it is not applicable.')
virtualDiskChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskChannel.setStatus('obsolete')
if mibBuilder.loadTexts: virtualDiskChannel.setDescription('This entry is obsolete. This property is not supported by virtual disks managed under Storage Management.')
virtualDiskTargetID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskTargetID.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskTargetID.setDescription('Unique ID for the virtual disk.')
virtualDiskLunID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskLunID.setStatus('obsolete')
if mibBuilder.loadTexts: virtualDiskLunID.setDescription('This entry is obsolete. This property is not supported by virtual disks managed under Storage Management.')
virtualDiskRollUpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 19), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskRollUpStatus.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskRollUpStatus.setDescription('Severity of the virtual disk state. This is the combined status of the virtual disk and its components. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
virtualDiskComponentStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 20), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskComponentStatus.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskComponentStatus.setDescription('The status of the virtual disk itself without the propagation of any contained component status. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
virtualDiskNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 21), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskNexusID.setDescription('Durable unique ID for this virtual disk.')
virtualDiskArrayDiskType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 99))).clone(namedValues=NamedValues(("sas", 1), ("sata", 2), ("scsi", 3), ("ide", 4), ("unknown", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskArrayDiskType.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskArrayDiskType.setDescription('Identifies the type of array (physical) disks used to create the virtual disk. Possible values: 1: SAS 2: SATA 3: SCSI 4: IDE 99: Unknown ')
virtualDiskBadBlocksDetected = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskBadBlocksDetected.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskBadBlocksDetected.setDescription('Indicates if virtual disk has bad blocks. Value: 0 - No, 1 - Yes, 2 - Not Applicable, 99 - Unknown')
virtualDiskEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskEncrypted.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskEncrypted.setDescription('Indicates if virtual disk is encrypted. Value: 0 - No, 1 - Yes, 99 - Unknown.')
virtualDiskIsCacheCade = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskIsCacheCade.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskIsCacheCade.setDescription('Indicates if this virtual disk is configured as CacheCade. Value: 1 - Yes, 0 - No, 99 - Undetermined')
virtualDiskDiskCachePolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskDiskCachePolicy.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskDiskCachePolicy.setDescription('Indicates disk cache policy of the logical device. Value: 1 - Enabled, 2 - Disabled, 99 - Undetermined')
virtualDiskAssociatedFluidCacheStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskAssociatedFluidCacheStatus.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskAssociatedFluidCacheStatus.setDescription('Indicates the status of the associated fluid cache status')
virtualDiskPIEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 1, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPIEnable.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPIEnable.setDescription('Indicates if T10 PI is enabled on a virtual disk. Possible values are: 0 (T10 PI disabled) or 1 (T10 PI enabled). ')
virtualDiskPartitionTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2), )
if mibBuilder.loadTexts: virtualDiskPartitionTable.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionTable.setDescription('A table of managed Virtual Disk Partitions. The number of entries is related to number of partitions discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries.')
virtualDiskPartitionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1), ).setIndexNames((0, "StorageManagement-MIB", "virtualDiskPartitionNumber"))
if mibBuilder.loadTexts: virtualDiskPartitionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionEntry.setDescription('An entry in the Virtual Disk Partition table. A row in this table cannot be created or deleted by SNMP operations on columns of the table.')
virtualDiskPartitionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionNumber.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionNumber.setDescription('Instance number of this partition entry')
virtualDiskPartitionDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionDeviceName.setDescription('Device name of the partition given by the operating system.')
virtualDiskPartitionState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("active", 1), ("no", 2), ("removing", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionState.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionState.setDescription('State of the partition. This is mapped stated of the associate virtual disk. 1: Active - Mapped cache disk is working fine 2: No - Fluid caching is not enabled 3: Removing - This is a transient stage duing the process of disabling the cache 4: Failed - Mapped cache disk has failed.')
virtualDiskPartitionSize = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionSize.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionSize.setDescription('Size of the Partition in GB.')
virtualDiskPartitionFluidCacheStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionFluidCacheStatus.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionFluidCacheStatus.setDescription('Indicates if the partition has associated fluidcache ')
virtualDiskPartitionNexusID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 2, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: virtualDiskPartitionNexusID.setStatus('mandatory')
if mibBuilder.loadTexts: virtualDiskPartitionNexusID.setDescription('Durable unique ID for this partition. This comprises the controllerID, virtualDisk ID and hash mapped WWN number of this partition')
fluidCacheTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4), )
if mibBuilder.loadTexts: fluidCacheTable.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheTable.setDescription('A table of managed FluidCache. The number of entries is related to FluidCache discovered in the system. The maximum number of entries is implementation dependent. Note: The properties in this table may not be applicable to all entries. ')
fluidCacheEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1), ).setIndexNames((0, "StorageManagement-MIB", "fluidCacheNumber"))
if mibBuilder.loadTexts: fluidCacheEntry.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheEntry.setDescription('An entry in the Fluid Cache table. A row in this table cannot be created or deleted by SNMP operations on columns of the table. ')
fluidCacheNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheNumber.setDescription('Instance number of this fluid cache entry')
fluidCacheName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheName.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheName.setDescription('The name of the fluidcache in this subsystem as represented in Storage Management. ')
fluidCacheLicenseState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseState.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseState.setDescription('License state of the associated fluid cache subsystem')
fluidCacheLicenseValidity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseValidity.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseValidity.setDescription("This entry displays the number of days the fluid cache license is valid. It has to be read in parallel to license type. Incase of site wide license, value '0' should be read as 'Not Applicable'")
fluidCacheLicenseEntitlementID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseEntitlementID.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseEntitlementID.setDescription('Indiactes the Entitlement Identifier for the license of the fluid cache subsystem')
fluidCacheLicenseDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseDuration.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseDuration.setDescription('Indicates the duration of the the license validity')
fluidCacheLicenseCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 7), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseCapacity.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseCapacity.setDescription('Indicates the capacity of the license of fluidcache subsystem')
fluidCacheLicenseRemaining = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 8), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: fluidCacheLicenseRemaining.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseRemaining.setDescription('Indicates the remaining days of the license validity')
fluidCacheLicenseType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseType.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseType.setDescription('Indicates the type of the license of the fluidcache subsystem')
fluidCacheLicenseVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseVendor.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseVendor.setDescription('Indicates the license vendor for the fluidcache subsystem')
fluidCacheLicenseProductId = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 11), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseProductId.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseProductId.setDescription('Indicates the product ID of the license of the fluidcache susbsytem')
fluidCacheLicenseDateSold = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 12), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseDateSold.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseDateSold.setDescription('Indicates the date on which the license for the fluidcache subsystem is sold')
fluidCacheLicenseGeneration = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 13), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseGeneration.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseGeneration.setDescription('Indicates the generation of the license for the fluidcache subsystem')
fluidCacheLicenseFeatureID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 14), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseFeatureID.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseFeatureID.setDescription('Indicates the license feature ID of the fluidcache subsystem')
fluidCacheLicenseFeatureDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 15), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheLicenseFeatureDescription.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheLicenseFeatureDescription.setDescription('Provides the description of the license feature of fluidcache subsystem')
fluidCacheNexus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 4, 1, 16), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheNexus.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheNexus.setDescription('Indicates the unique ID of the fludicache component')
fluidCacheDiskTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5), )
if mibBuilder.loadTexts: fluidCacheDiskTable.setStatus('mandatory')
fluidCacheDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1), ).setIndexNames((0, "StorageManagement-MIB", "fluidCacheDiskNumber"))
if mibBuilder.loadTexts: fluidCacheDiskEntry.setStatus('mandatory')
fluidCacheDiskNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskNumber.setDescription('Instance number of the fluidCacheDisk entry')
fluidCacheDiskName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskName.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskName.setDescription('Name of the Fluid Cache Disk Name as seen by storage management')
fluidCacheDiskState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskState.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskState.setDescription('State of the FluidCacheDisk entry Valid values are 2 - Active, otherwise Inactive ')
fluidCacheDiskBackendDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceType.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceType.setDescription('This displays the type of the backend device beneath the fluid cache disk. Values are as below 773 denotes VirtualDisk 791 denotes Partition')
fluidCacheDiskBackendDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceName.setDescription('Device name of the backend device as seen by the operating system')
fluidCacheDiskBackendDeviceSize = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceSize.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskBackendDeviceSize.setDescription('Size of the Backend device. Size is indicated in GB')
fluidCacheDiskOperatingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskOperatingMode.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskOperatingMode.setDescription('Operating mode of the FluidCache. Values are 0 - Writeback mode 1 - Writethrough mode 2 - PassThrough mode ')
fluidCacheDiskConfiguredMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskConfiguredMode.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskConfiguredMode.setDescription('Configured Mode of the FluidCache. The values are 0 - Writeback mode 1 - Writethrough mode 2 - PassThrough mode ')
fluidCacheDiskNexus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 9), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: fluidCacheDiskNexus.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskNexus.setDescription('Durable Unique ID of the fluidcache disk')
fluidCacheDiskStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 5, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCacheDiskStatus.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCacheDiskStatus.setDescription('Severity of the fluid cache disk state. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
fluidCachePoolTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6), )
if mibBuilder.loadTexts: fluidCachePoolTable.setStatus('mandatory')
fluidCachePoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1), ).setIndexNames((0, "StorageManagement-MIB", "fluidCachePoolNumber"))
if mibBuilder.loadTexts: fluidCachePoolEntry.setStatus('mandatory')
fluidCachePoolNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolNumber.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolNumber.setDescription('Instance number of the fluid cachepool entry')
fluidCachePoolStoreCount = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolStoreCount.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolStoreCount.setDescription('Number of flash devices that are part of the fluidcache pool')
fluidCachePoolUUID = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolUUID.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolUUID.setDescription('UUID of the fluidcache pool')
fluidCachePoolLicenseState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolLicenseState.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolLicenseState.setDescription('State of the fluid cache license. Possible values: Valid, Evaluation, Expired ')
fluidCachePoolSize = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolSize.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolSize.setDescription('Size of the fluidcache pool in GiB')
fluidCachePoolHighAvailabilityState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolHighAvailabilityState.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolHighAvailabilityState.setDescription('Indicates if the fluidcache pool is operating in high availability mode.')
fluidCachePoolNexus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 7), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolNexus.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolNexus.setDescription('Durable unique ID of the fluidcache pool entry')
fluidCachePoolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 140, 6, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fluidCachePoolStatus.setStatus('mandatory')
if mibBuilder.loadTexts: fluidCachePoolStatus.setDescription('Severity of the fluid cache pool table state. Possible values: 1: Other 2: Unknown 3: OK 4: Non-critical 5: Critical 6: Non-recoverable')
storageManagementEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200))
messageIDEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: messageIDEvent.setStatus('mandatory')
if mibBuilder.loadTexts: messageIDEvent.setDescription('0200.0001 Storage Management alert (event) message number.')
descriptionEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: descriptionEvent.setStatus('mandatory')
if mibBuilder.loadTexts: descriptionEvent.setDescription('0200.0002 Storage Management event message text describing the alert.')
locationEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: locationEvent.setStatus('mandatory')
if mibBuilder.loadTexts: locationEvent.setDescription('0200.0003 Additional information identifying the location of the object causing the alert.')
objectNameEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: objectNameEvent.setStatus('mandatory')
if mibBuilder.loadTexts: objectNameEvent.setDescription('0200.0004 Name of the object as represented in Storage Management causing the alert.')
objectOIDEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: objectOIDEvent.setStatus('mandatory')
if mibBuilder.loadTexts: objectOIDEvent.setDescription('0200.0005 MIB OID of the object causing the alert.')
objectNexusEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: objectNexusEvent.setStatus('mandatory')
if mibBuilder.loadTexts: objectNexusEvent.setDescription('0200.0006 Durable, unique ID of the object causing the alert.')
currentStatusEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 7), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: currentStatusEvent.setStatus('mandatory')
if mibBuilder.loadTexts: currentStatusEvent.setDescription('0200.0007 Current status of object causing the alert, if applicable.')
previousStatusEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 8), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: previousStatusEvent.setStatus('mandatory')
if mibBuilder.loadTexts: previousStatusEvent.setDescription('0200.0008 Previous status of object causing the alert if applicable.')
enhancedMessageIDEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: enhancedMessageIDEvent.setStatus('mandatory')
if mibBuilder.loadTexts: enhancedMessageIDEvent.setDescription('0200.0001 Enhanced Storage Management Message ID.')
systemFQDNEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: systemFQDNEvent.setStatus('mandatory')
if mibBuilder.loadTexts: systemFQDNEvent.setDescription('0200.0008 System FQDN of object causing the alert if applicable.')
serviceTagEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serviceTagEvent.setStatus('mandatory')
if mibBuilder.loadTexts: serviceTagEvent.setDescription('0200.0008 Service Tag of object causing the alert if applicable.')
chassisServiceTagEvent = MibScalar((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chassisServiceTagEvent.setStatus('mandatory')
if mibBuilder.loadTexts: chassisServiceTagEvent.setDescription('0200.0008 Chassis Service Tag of object causing the alert if applicable.')
alertStorageManagementInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,101)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"))
if mibBuilder.loadTexts: alertStorageManagementInformation.setDescription('Storage Management Information There is no global status change associated with this trap.')
alertStorageManagementNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,102)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"))
if mibBuilder.loadTexts: alertStorageManagementNormal.setDescription('There is no global status change associated with this trap.')
alertStorageManagementWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,103)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"))
if mibBuilder.loadTexts: alertStorageManagementWarning.setDescription('Storage Management has detected a device independent warning condition. There is no global status change associated with this trap.')
alertStorageManagementFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,104)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"))
if mibBuilder.loadTexts: alertStorageManagementFailure.setDescription('Storage Management has detected a device independent error condition. There is no global status change associated with this trap.')
alertStorageManagementNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"))
if mibBuilder.loadTexts: alertStorageManagementNonRecoverable.setDescription('Storage Management has detected a device independent non-recoverable condition. There is no global status change associated with this trap.')
alertControllerInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,751)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertControllerInformation.setDescription('Controller information.')
alertControllerNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,752)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertControllerNormal.setDescription('Controller has returned to normal.')
alertControllerWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,753)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertControllerWarning.setDescription('Controller warning.')
alertControllerFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,754)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertControllerFailure.setDescription('Controller failure.')
alertControllerNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,755)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertControllerNonRecoverable.setDescription('Controller is non-recoverable.')
alertChannelInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,801)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertChannelInformation.setDescription('Channel information.')
alertChannelNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,802)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertChannelNormal.setDescription('Channel has returned to normal.')
alertChannelWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,803)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertChannelWarning.setDescription('Channel warning.')
alertChannelFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,804)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertChannelFailure.setDescription('Channel failure.')
alertChannelNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,805)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertChannelNonRecoverable.setDescription('Channel is non-recoverable.')
alertEnclosureInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,851)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEnclosureInformation.setDescription('Enclosure information.')
alertEnclosureNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,852)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEnclosureNormal.setDescription('Enclosure has returned to normal.')
alertEnclosureWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,853)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEnclosureWarning.setDescription('Enclosure warning.')
alertEnclosureFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,854)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEnclosureFailure.setDescription('Enclosure failure.')
alertEnclosureNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,855)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEnclosureNonRecoverable.setDescription('Enclosure is non-recoverable.')
alertArrayDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,901)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertArrayDiskInformation.setDescription('Array disk information.')
alertArrayDiskNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,902)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertArrayDiskNormal.setDescription('Array disk has returned to normal.')
alertArrayDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,903)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertArrayDiskWarning.setDescription('Array disk warning.')
alertArrayDiskFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,904)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertArrayDiskFailure.setDescription('Array disk failure.')
alertArrayDiskNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,905)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertArrayDiskNonRecoverable.setDescription('Array disk is non-recoverable.')
alertEMMInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,951)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEMMInformation.setDescription('EMM information.')
alertEMMNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,952)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEMMNormal.setDescription('EMM has returned to normal.')
alertEMMWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,953)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEMMWarning.setDescription('EMM warning.')
alertEMMFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,954)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEMMFailure.setDescription('EMM failure.')
alertEMMNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,955)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertEMMNonRecoverable.setDescription('EMM is non-recoverable.')
alertPowerSupplyInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1001)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertPowerSupplyInformation.setDescription('Power supply information.')
alertPowerSupplyNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1002)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertPowerSupplyNormal.setDescription('Power supply has returned to normal.')
alertPowerSupplyWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1003)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertPowerSupplyWarning.setDescription('Power supply warning.')
alertPowerSupplyFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1004)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertPowerSupplyFailure.setDescription('Power supply failure.')
alertPowerSupplyNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1005)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertPowerSupplyNonRecoverable.setDescription('Power supply is non-recoverable.')
alertTemperatureProbeInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1051)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertTemperatureProbeInformation.setDescription('Temperature probe information.')
alertTemperatureProbeNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1052)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertTemperatureProbeNormal.setDescription('Temperature probe has returned to normal.')
alertTemperatureProbeWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1053)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertTemperatureProbeWarning.setDescription('Temperature probe warning.')
alertTemperatureProbeFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1054)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertTemperatureProbeFailure.setDescription('Temperature probe failure.')
alertTemperatureProbeNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1055)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertTemperatureProbeNonRecoverable.setDescription('Temperature probe is non-recoverable.')
alertFanInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1101)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFanInformation.setDescription('Fan information.')
alertFanNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1102)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFanNormal.setDescription('Fan has returned to normal.')
alertFanWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1103)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFanWarning.setDescription('Fan warning.')
alertFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1104)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFanFailure.setDescription('Fan failure.')
alertFanNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1105)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFanNonRecoverable.setDescription('Fan is non-recoverable.')
alertBatteryInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1151)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertBatteryInformation.setDescription('Battery information.')
alertBatteryNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1152)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertBatteryNormal.setDescription('Battery has returned to normal.')
alertBatteryWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1153)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertBatteryWarning.setDescription('Battery warning.')
alertBatteryFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1154)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertBatteryFailure.setDescription('Battery failure.')
alertBatteryNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1155)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertBatteryNonRecoverable.setDescription('Battery is non-recoverable.')
alertVirtualDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1201)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskInformation.setDescription('Virtual disk information.')
alertVirtualDiskNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1202)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskNormal.setDescription('Virtual disk has returned to normal.')
alertVirtualDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1203)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskWarning.setDescription('Virtual disk warning.')
alertVirtualDiskFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1204)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskFailure.setDescription('Virtual disk failure.')
alertVirtualDiskNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1205)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskNonRecoverable.setDescription('Virtual disk is non-recoverable.')
alertRedundancyNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1304)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertRedundancyNormal.setDescription('Redundancy has returned to normal.')
alertRedundancyDegraded = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1305)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertRedundancyDegraded.setDescription('Redundancy has been degraded.')
alertRedundancyLost = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1306)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertRedundancyLost.setDescription('Redundancy has been lost.')
alertFluidCacheDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1401)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCacheDiskInformation.setDescription('Fluid Cache Disk Information')
alertfluidCacheDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1403)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertfluidCacheDiskWarning.setDescription('Fluid Cache Disk warning.')
alertFluidCacheDisklFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1404)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCacheDisklFailure.setDescription('Fluid Cache Disk failure.')
alertVirtualDiskPartitionInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1501)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskPartitionInformation.setDescription('Virtual Disk Partition Information')
alertVirtualDiskPartitionWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1503)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskPartitionWarning.setDescription('Virtual Disk Partition warning.')
alertVirtualDiskPartitionFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1504)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertVirtualDiskPartitionFailure.setDescription('Virtual Disk Partition failure.')
alertFluidCacheInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1601)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCacheInformation.setDescription('Fluid Cache Information')
alertfluidCacheWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1603)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertfluidCacheWarning.setDescription('Fluid Cache warning.')
alertFluidCacheFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1604)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCacheFailure.setDescription('Fluid Cache failure.')
alertFluidCachePoolInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1701)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCachePoolInformation.setDescription('Fluid Cache Pool Information')
alertfluidCachePoolWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1703)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertfluidCachePoolWarning.setDescription('Fluid Cache Pool warning.')
alertFluidCachePoolFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,1704)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"))
if mibBuilder.loadTexts: alertFluidCachePoolFailure.setDescription('Fluid Cache Pool failure.')
alertEEMIStorageManagementInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,10100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIStorageManagementInformation.setDescription('Storage Management Information There is no global status change associated with this trap.')
alertEEMIStorageManagementNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,10200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIStorageManagementNormal.setDescription('There is no global status change associated with this trap.')
alertEEMIStorageManagementWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,10300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIStorageManagementWarning.setDescription('Storage Management has detected a device independent warning condition. There is no global status change associated with this trap.')
alertEEMIStorageManagementFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,10400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIStorageManagementFailure.setDescription('Storage Management has detected a device independent error condition. There is no global status change associated with this trap.')
alertEEMIStorageManagementNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,10500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIStorageManagementNonRecoverable.setDescription('Storage Management has detected a device independent non-recoverable condition. There is no global status change associated with this trap.')
alertEEMIControllerInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,75100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIControllerInformation.setDescription('Controller information.')
alertEEMIControllerNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,75200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIControllerNormal.setDescription('Controller has returned to normal.')
alertEEMIControllerWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,75300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIControllerWarning.setDescription('Controller warning.')
alertEEMIControllerFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,75400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIControllerFailure.setDescription('Controller failure.')
alertEEMIControllerNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,75500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIControllerNonRecoverable.setDescription('Controller is non-recoverable.')
alertEEMIChannelInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,80100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIChannelInformation.setDescription('Channel information.')
alertEEMIChannelNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,80200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIChannelNormal.setDescription('Channel has returned to normal.')
alertEEMIChannelWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,80300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIChannelWarning.setDescription('Channel warning.')
alertEEMIChannelFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,80400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIChannelFailure.setDescription('Channel failure.')
alertEEMIChannelNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,80500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIChannelNonRecoverable.setDescription('Channel is non-recoverable.')
alertEEMIEnclosureInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,85100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEnclosureInformation.setDescription('Enclosure information.')
alertEEMIEnclosureNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,85200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEnclosureNormal.setDescription('Enclosure has returned to normal.')
alertEEMIEnclosureWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,85300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEnclosureWarning.setDescription('Enclosure warning.')
alertEEMIEnclosureFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,85400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEnclosureFailure.setDescription('Enclosure failure.')
alertEEMIEnclosureNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,85500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEnclosureNonRecoverable.setDescription('Enclosure is non-recoverable.')
alertEEMIArrayDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,90100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIArrayDiskInformation.setDescription('Array disk information.')
alertEEMIArrayDiskNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,90200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIArrayDiskNormal.setDescription('Array disk has returned to normal.')
alertEEMIArrayDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,90300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIArrayDiskWarning.setDescription('Array disk warning.')
alertEEMIArrayDiskFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,90400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIArrayDiskFailure.setDescription('Array disk failure.')
alertEEMIArrayDiskNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,90500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIArrayDiskNonRecoverable.setDescription('Array disk is non-recoverable.')
alertEMMEMMInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,95100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEMMEMMInformation.setDescription('EMM information.')
alertEEMIEMMNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,95200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEMMNormal.setDescription('EMM has returned to normal.')
alertEEMIEMMWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,95300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEMMWarning.setDescription('EMM warning.')
alertEEMIEMMFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,95400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEMMFailure.setDescription('EMM failure.')
alertEEMIEMMNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,95500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIEMMNonRecoverable.setDescription('EMM is non-recoverable.')
alertEEMIPowerSupplyInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,100100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIPowerSupplyInformation.setDescription('Power supply information.')
alertEEMIPowerSupplyNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,100200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIPowerSupplyNormal.setDescription('Power supply has returned to normal.')
alertEEMIPowerSupplyWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,100300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIPowerSupplyWarning.setDescription('Power supply warning.')
alertEEMIPowerSupplyFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,100400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIPowerSupplyFailure.setDescription('Power supply failure.')
alertEEMIPowerSupplyNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,100500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIPowerSupplyNonRecoverable.setDescription('Power supply is non-recoverable.')
alertEEMITemperatureProbeInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMITemperatureProbeInformation.setDescription('Temperature probe information.')
alertEEMITemperatureProbeNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMITemperatureProbeNormal.setDescription('Temperature probe has returned to normal.')
alertEEMITemperatureProbeWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMITemperatureProbeWarning.setDescription('Temperature probe warning.')
alertEEMITemperatureProbeFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMITemperatureProbeFailure.setDescription('Temperature probe failure.')
alertEEMITemperatureProbeNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,105500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMITemperatureProbeNonRecoverable.setDescription('Temperature probe is non-recoverable.')
alertEEMIFanInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,110100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFanInformation.setDescription('Fan information.')
alertEEMIFanNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,110200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFanNormal.setDescription('Fan has returned to normal.')
alertEEMIFanWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,110300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFanWarning.setDescription('Fan warning.')
alertEEMIFanFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,110400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFanFailure.setDescription('Fan failure.')
alertEEMIFanNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,110500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFanNonRecoverable.setDescription('Fan is non-recoverable.')
alertEEMIBatteryInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,115100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIBatteryInformation.setDescription('Battery information.')
alertEEMIBatteryNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,115200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIBatteryNormal.setDescription('Battery has returned to normal.')
alertEEMIBatteryWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,115300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIBatteryWarning.setDescription('Battery warning.')
alertEEMIBatteryFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,115400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIBatteryFailure.setDescription('Battery failure.')
alertEEMIBatteryNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,115500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIBatteryNonRecoverable.setDescription('Battery is non-recoverable.')
alertEEMIVirtualDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,120100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskInformation.setDescription('Virtual disk information.')
alertEEMIVirtualDiskNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,120200)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskNormal.setDescription('Virtual disk has returned to normal.')
alertEEMIVirtualDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,120300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskWarning.setDescription('Virtual disk warning.')
alertEEMIVirtualDiskFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,120400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskFailure.setDescription('Virtual disk failure.')
alertEEMIVirtualDiskNonRecoverable = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,120500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskNonRecoverable.setDescription('Virtual disk is non-recoverable.')
alertEEMIRedundancyNormal = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,130400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIRedundancyNormal.setDescription('Redundancy has returned to normal.')
alertEEMIRedundancyDegraded = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,130500)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIRedundancyDegraded.setDescription('Redundancy has been degraded.')
alertEEMIRedundancyLost = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,130600)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIRedundancyLost.setDescription('Redundancy has been lost.')
alertEEMIFluidCacheDiskInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,140100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCacheDiskInformation.setDescription('Fluid Cache Disk Information')
alertEEMIfluidCacheDiskWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,140300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIfluidCacheDiskWarning.setDescription('Fluid Cache Disk warning.')
alertEEMIFluidCacheDisklFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,140400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCacheDisklFailure.setDescription('Fluid Cache Disk failure.')
alertEEMIVirtualDiskPartitionInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,150100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskPartitionInformation.setDescription('Virtual Disk Partition Information')
alertEEMIVirtualDiskPartitionWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,150300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskPartitionWarning.setDescription('Virtual Disk Partition warning.')
alertEEMIVirtualDiskPartitionFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,150400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIVirtualDiskPartitionFailure.setDescription('Virtual Disk Partition failure.')
alertEEMIFluidCacheInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,160100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCacheInformation.setDescription('Fluid Cache Information')
alertEEMIfluidCacheWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,160300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIfluidCacheWarning.setDescription('Fluid Cache warning.')
alertEEMIFluidCacheFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,160400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCacheFailure.setDescription('Fluid Cache failure.')
alertEEMIFluidCachePoolInformation = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,170100)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCachePoolInformation.setDescription('Fluid Cache Pool Information')
alertEEMIfluidCachePoolWarning = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,170300)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIfluidCachePoolWarning.setDescription('Fluid Cache Pool warning.')
alertEEMIFluidCachePoolFailure = NotificationType((1, 3, 6, 1, 4, 1, 674, 10893, 1, 20, 200) + (0,170400)).setObjects(("StorageManagement-MIB", "messageIDEvent"), ("StorageManagement-MIB", "descriptionEvent"), ("StorageManagement-MIB", "locationEvent"), ("StorageManagement-MIB", "objectNameEvent"), ("StorageManagement-MIB", "objectOIDEvent"), ("StorageManagement-MIB", "objectNexusEvent"), ("StorageManagement-MIB", "currentStatusEvent"), ("StorageManagement-MIB", "previousStatusEvent"), ("StorageManagement-MIB", "enhancedMessageIDEvent"), ("StorageManagement-MIB", "systemFQDNEvent"), ("StorageManagement-MIB", "serviceTagEvent"), ("StorageManagement-MIB", "chassisServiceTagEvent"))
if mibBuilder.loadTexts: alertEEMIFluidCachePoolFailure.setDescription('Fluid Cache Pool failure.')
mibBuilder.exportSymbols("StorageManagement-MIB", powerSupplyConnectionPowersupplyNumber=powerSupplyConnectionPowersupplyNumber, alertFluidCacheInformation=alertFluidCacheInformation, fanComponentStatus=fanComponentStatus, virtualDiskPartitionNexusID=virtualDiskPartitionNexusID, virtualDiskCurStripeSizeInMB=virtualDiskCurStripeSizeInMB, virtualDiskPartitionSize=virtualDiskPartitionSize, arrayDiskLengthInBytes=arrayDiskLengthInBytes, controllerC0Target=controllerC0Target, arrayDiskFreeSpaceInBytes=arrayDiskFreeSpaceInBytes, virtualDiskFreeSpaceInMB=virtualDiskFreeSpaceInMB, channelEntry=channelEntry, alertEEMIStorageManagementNormal=alertEEMIStorageManagementNormal, temperatureConnectionEnclosureNumber=temperatureConnectionEnclosureNumber, arrayDiskLogicalConnectionNumber=arrayDiskLogicalConnectionNumber, controllerCacheCade=controllerCacheCade, enclosureOccupiedSlotCount=enclosureOccupiedSlotCount, controllerC1Port0ID=controllerC1Port0ID, arrayDiskEnclosureConnectionTable=arrayDiskEnclosureConnectionTable, controllerPatrolReadIterations=controllerPatrolReadIterations, temperatureProbeSeverity=temperatureProbeSeverity, batteryNextLearnTime=batteryNextLearnTime, alertEEMIPowerSupplyFailure=alertEEMIPowerSupplyFailure, temperatureConnectionTemperatureNumber=temperatureConnectionTemperatureNumber, fanTable=fanTable, controllerSeverity=controllerSeverity, virtualDiskLunID=virtualDiskLunID, virtualDiskPartitionNumber=virtualDiskPartitionNumber, enclosureAlarm=enclosureAlarm, fluidCachePoolSize=fluidCachePoolSize, batteryNumber=batteryNumber, softwareProduct=softwareProduct, fluidCacheNexus=fluidCacheNexus, virtualDiskPartitionState=virtualDiskPartitionState, channelName=channelName, arrayDiskRevision=arrayDiskRevision, alertRedundancyDegraded=alertRedundancyDegraded, enclosureSplitBusPartNumber=enclosureSplitBusPartNumber, enclosureBackplanePartNumber=enclosureBackplanePartNumber, alertFanWarning=alertFanWarning, arrayDiskTable=arrayDiskTable, controllerEncryptionKeyPresent=controllerEncryptionKeyPresent, alertEEMIEnclosureFailure=alertEEMIEnclosureFailure, fluidCacheLicenseState=fluidCacheLicenseState, enclosureAssetName=enclosureAssetName, arrayDiskBusType=arrayDiskBusType, alertTemperatureProbeFailure=alertTemperatureProbeFailure, alertPowerSupplyNonRecoverable=alertPowerSupplyNonRecoverable, virtualDiskNumber=virtualDiskNumber, alertEEMIChannelInformation=alertEEMIChannelInformation, temperatureProbeState=temperatureProbeState, globalData=globalData, fanName=fanName, arrayDiskLengthInMB=arrayDiskLengthInMB, alertEEMIEMMNormal=alertEEMIEMMNormal, enclosureProcessor2Version=enclosureProcessor2Version, alertfluidCachePoolWarning=alertfluidCachePoolWarning, alertEMMNonRecoverable=alertEMMNonRecoverable, objectNexusEvent=objectNexusEvent, enclosureName=enclosureName, controllerLogicalDeviceCount=controllerLogicalDeviceCount, alertRedundancyNormal=alertRedundancyNormal, alertEEMIFluidCacheDisklFailure=alertEEMIFluidCacheDisklFailure, alertEEMIChannelNonRecoverable=alertEEMIChannelNonRecoverable, arrayDiskEnclosureConnectionArrayDiskName=arrayDiskEnclosureConnectionArrayDiskName, DellStatus=DellStatus, arrayDiskFreeSpaceInMB=arrayDiskFreeSpaceInMB, alertEEMIRedundancyLost=alertEEMIRedundancyLost, arrayDiskDeviceName=arrayDiskDeviceName, alertVirtualDiskPartitionWarning=alertVirtualDiskPartitionWarning, arrayDiskDellCertified=arrayDiskDellCertified, channelTable=channelTable, controllerState=controllerState, tapeDriveBusType=tapeDriveBusType, fluidCacheLicenseVendor=fluidCacheLicenseVendor, arrayDiskManufactureDay=arrayDiskManufactureDay, batteryLearnMode=batteryLearnMode, temperatureProbeMaxWarning=temperatureProbeMaxWarning, alertVirtualDiskNormal=alertVirtualDiskNormal, alertEEMIPowerSupplyNormal=alertEEMIPowerSupplyNormal, virtualDiskLayout=virtualDiskLayout, controllerPersistentHotSpare=controllerPersistentHotSpare, arrayDiskLogicalConnectionEntry=arrayDiskLogicalConnectionEntry, arrayDiskRollUpStatus=arrayDiskRollUpStatus, alertBatteryNormal=alertBatteryNormal, arrayDiskChannelConnectionChannelNumber=arrayDiskChannelConnectionChannelNumber, alertEEMITemperatureProbeNormal=alertEEMITemperatureProbeNormal, alertFluidCachePoolInformation=alertFluidCachePoolInformation, temperatureProbeCurValue=temperatureProbeCurValue, controllerAlarmState=controllerAlarmState, controllerC0BatteryState=controllerC0BatteryState, arrayDiskVendor=arrayDiskVendor, alertVirtualDiskWarning=alertVirtualDiskWarning, systemFQDNEvent=systemFQDNEvent, batteryMaxChargeCount=batteryMaxChargeCount, arrayDiskNegotiatedLinkWidth=arrayDiskNegotiatedLinkWidth, alertPowerSupplyFailure=alertPowerSupplyFailure, fanConnectionEntry=fanConnectionEntry, powerSupplyConnectionTable=powerSupplyConnectionTable, fluidCacheDiskOperatingMode=fluidCacheDiskOperatingMode, alertfluidCacheDiskWarning=alertfluidCacheDiskWarning, alertEEMIPowerSupplyWarning=alertEEMIPowerSupplyWarning, controllerSpinDownTimeInterval=controllerSpinDownTimeInterval, alertEMMFailure=alertEMMFailure, batteryID=batteryID, alertTemperatureProbeWarning=alertTemperatureProbeWarning, alertEEMIChannelNormal=alertEEMIChannelNormal, fluidCachePoolStoreCount=fluidCachePoolStoreCount, enclosureType=enclosureType, temperatureConnectionTable=temperatureConnectionTable, controllerEncryptionCapable=controllerEncryptionCapable, alertTemperatureProbeNormal=alertTemperatureProbeNormal, alertEEMIFluidCacheFailure=alertEEMIFluidCacheFailure, temperatureConnectionEntry=temperatureConnectionEntry, controllerC0Port0ID=controllerC0Port0ID, controllerNexusID=controllerNexusID, alertEEMIEnclosureNonRecoverable=alertEEMIEnclosureNonRecoverable, fluidCacheLicenseRemaining=fluidCacheLicenseRemaining, enclosureManagementModuleConnectionEMMName=enclosureManagementModuleConnectionEMMName, alertPowerSupplyNormal=alertPowerSupplyNormal, batteryChargeCount=batteryChargeCount, enclosureNumber=enclosureNumber, alertEEMIControllerNonRecoverable=alertEEMIControllerNonRecoverable, powerSupplyConnectionFirmwareVersion=powerSupplyConnectionFirmwareVersion, alertBatteryFailure=alertBatteryFailure, enclosureState=enclosureState, powerSupplySeverity=powerSupplySeverity, fluidCacheLicenseType=fluidCacheLicenseType, alertEMMNormal=alertEMMNormal, batteryPredictedCapacity=batteryPredictedCapacity, physicalDevices=physicalDevices, alertEEMIEMMNonRecoverable=alertEEMIEMMNonRecoverable, fluidCacheDiskBackendDeviceSize=fluidCacheDiskBackendDeviceSize, alertEEMITemperatureProbeWarning=alertEEMITemperatureProbeWarning, virtualDiskRollUpStatus=virtualDiskRollUpStatus, enclosureProductID=enclosureProductID, objectOIDEvent=objectOIDEvent, powerSupplyComponentStatus=powerSupplyComponentStatus, agentHostname=agentHostname, displayName=displayName, fluidCacheDiskBackendDeviceType=fluidCacheDiskBackendDeviceType, controllerBGIRate=controllerBGIRate, powerSupplyTable=powerSupplyTable, virtualDiskIsCacheCade=virtualDiskIsCacheCade, tapeDriveName=tapeDriveName, controllerDriveChannelCount=controllerDriveChannelCount, channelTermination=channelTermination, fluidCacheDiskName=fluidCacheDiskName, software=software, alertControllerWarning=alertControllerWarning, enclosureManagementModuleNumber=enclosureManagementModuleNumber, alertFanNormal=alertFanNormal, batteryMaxLearnDelay=batteryMaxLearnDelay, storageManagementEvent=storageManagementEvent, enclosureManagementModuleVendor=enclosureManagementModuleVendor, tapeDriveVendor=tapeDriveVendor, fanVendor=fanVendor, controllerTable=controllerTable, virtualDiskNexusID=virtualDiskNexusID, temperatureProbeTable=temperatureProbeTable, alertFluidCacheDiskInformation=alertFluidCacheDiskInformation, controllerVendor=controllerVendor, controllerChannelCount=controllerChannelCount, agentVendor=agentVendor, alertTemperatureProbeNonRecoverable=alertTemperatureProbeNonRecoverable, enclosureManagementModuleName=enclosureManagementModuleName, alertStorageManagementNormal=alertStorageManagementNormal, enclosureManagementModuleConnectionEnclosureName=enclosureManagementModuleConnectionEnclosureName, alertEEMIFanNormal=alertEEMIFanNormal, alertEEMIFanWarning=alertEEMIFanWarning, controllerType=controllerType, fanProbeCurrValue=fanProbeCurrValue, arrayDiskNegotiatedSpeed=arrayDiskNegotiatedSpeed, virtualDiskState=virtualDiskState, virtualDiskPartitionDeviceName=virtualDiskPartitionDeviceName, fluidCachePoolStatus=fluidCachePoolStatus, powerSupplyConnectionEntry=powerSupplyConnectionEntry, agentSystemGlobalStatus=agentSystemGlobalStatus, controllerEntry=controllerEntry, fanConnectionFanName=fanConnectionFanName, fanConnectionEnclosureName=fanConnectionEnclosureName, channelDataRate=channelDataRate, descriptionEvent=descriptionEvent, alertEnclosureFailure=alertEnclosureFailure, arrayDiskEnclosureConnectionControllerName=arrayDiskEnclosureConnectionControllerName, alertEEMIArrayDiskWarning=alertEEMIArrayDiskWarning, arrayDiskLogicalConnectionDiskNumber=arrayDiskLogicalConnectionDiskNumber, alertEEMIEnclosureWarning=alertEEMIEnclosureWarning, alertStorageManagementNonRecoverable=alertStorageManagementNonRecoverable, alertBatteryWarning=alertBatteryWarning, alertEEMIVirtualDiskNonRecoverable=alertEEMIVirtualDiskNonRecoverable, enclosureSerialNumber=enclosureSerialNumber, controllerC1Port1WWN=controllerC1Port1WWN, batteryEntry=batteryEntry, alertEEMIArrayDiskFailure=alertEEMIArrayDiskFailure, batteryNexusID=batteryNexusID, agentSmartThermalShutdown=agentSmartThermalShutdown, agentManagementSoftwareURLName=agentManagementSoftwareURLName, virtualDiskPIEnable=virtualDiskPIEnable, alertEEMIFluidCacheDiskInformation=alertEEMIFluidCacheDiskInformation, fanRevision=fanRevision, alertRedundancyLost=alertRedundancyLost, arrayDiskEnclosureConnectionEntry=arrayDiskEnclosureConnectionEntry, virtualDiskPartitionTable=virtualDiskPartitionTable, virtualDiskPartitionEntry=virtualDiskPartitionEntry, messageIDEvent=messageIDEvent, enclosureManagementModuleConnectionNumber=enclosureManagementModuleConnectionNumber, fluidCacheDiskNexus=fluidCacheDiskNexus, batteryConnectionTable=batteryConnectionTable, fluidCacheLicenseGeneration=fluidCacheLicenseGeneration, arrayDiskUsedSpaceInMB=arrayDiskUsedSpaceInMB, temperatureProbeNexusID=temperatureProbeNexusID, controllerNumber=controllerNumber, fluidCachePoolHighAvailabilityState=fluidCachePoolHighAvailabilityState, arrayDiskSmartAlertIndication=arrayDiskSmartAlertIndication, enclosureManagementModuleRevision=enclosureManagementModuleRevision, agentSnmpVersion=agentSnmpVersion, arrayDiskSectorSize=arrayDiskSectorSize, controllerC1BatteryState=controllerC1BatteryState, alertChannelNormal=alertChannelNormal, controllerFWVersion=controllerFWVersion, controllerName=controllerName, temperatureProbeName=temperatureProbeName, alertEEMIfluidCachePoolWarning=alertEEMIfluidCachePoolWarning, alertControllerFailure=alertControllerFailure, fluidCacheLicenseValidity=fluidCacheLicenseValidity, tapeDriveSASAddress=tapeDriveSASAddress, controllerC1Channel=controllerC1Channel, powerSupply1PartNumber=powerSupply1PartNumber, controllerRollUpStatus=controllerRollUpStatus, fluidCachePoolNexus=fluidCachePoolNexus, arrayDiskSeverity=arrayDiskSeverity, controllerConfiguredDrivesSpinUpTimeInterval=controllerConfiguredDrivesSpinUpTimeInterval, batteryConnectionBatteryNumber=batteryConnectionBatteryNumber, arrayDiskPowerState=arrayDiskPowerState, batteryConnectionControllerNumber=batteryConnectionControllerNumber, alertEnclosureNormal=alertEnclosureNormal, previousStatusEvent=previousStatusEvent, fanConnectionFanNumber=fanConnectionFanNumber, agentGetTimeout=agentGetTimeout, controllerCheckConsistencyRate=controllerCheckConsistencyRate, alertStorageManagementInformation=alertStorageManagementInformation, enclosureSASAddress=enclosureSASAddress, alertEEMIArrayDiskInformation=alertEEMIArrayDiskInformation, alertChannelInformation=alertChannelInformation, agentVersion=agentVersion, fluidCacheDiskState=fluidCacheDiskState, alertEEMIFluidCachePoolInformation=alertEEMIFluidCachePoolInformation, batteryConnectionControllerName=batteryConnectionControllerName, channelNumber=channelNumber, alertFluidCacheDisklFailure=alertFluidCacheDisklFailure, alertEEMIBatteryInformation=alertEEMIBatteryInformation, alertBatteryNonRecoverable=alertBatteryNonRecoverable, enclosureTable=enclosureTable, arrayDiskAltaVendorId=arrayDiskAltaVendorId)
mibBuilder.exportSymbols("StorageManagement-MIB", powerSupplyName=powerSupplyName, enclosureKernelVersion=enclosureKernelVersion, fluidCachePoolNumber=fluidCachePoolNumber, fluidCachePoolUUID=fluidCachePoolUUID, fanSeverity=fanSeverity, channelComponentStatus=channelComponentStatus, virtualDiskComponentStatus=virtualDiskComponentStatus, temperatureProbeMaxCritical=temperatureProbeMaxCritical, alertFanInformation=alertFanInformation, alertEEMIEMMWarning=alertEEMIEMMWarning, alertVirtualDiskFailure=alertVirtualDiskFailure, fanProbeMaxCritical=fanProbeMaxCritical, fanConnectionEnclosureNumber=fanConnectionEnclosureNumber, fluidCacheLicenseFeatureDescription=fluidCacheLicenseFeatureDescription, controllerStorportDriverVersion=controllerStorportDriverVersion, storageManagementInfo=storageManagementInfo, softwareManufacturer=softwareManufacturer, powerSupplyConnectionEnclosureNumber=powerSupplyConnectionEnclosureNumber, alertEEMIFanNonRecoverable=alertEEMIFanNonRecoverable, virtualDiskCachePolicy=virtualDiskCachePolicy, alertBatteryInformation=alertBatteryInformation, fluidCacheDiskNumber=fluidCacheDiskNumber, temperatureProbeComponentStatus=temperatureProbeComponentStatus, powerSupplyConnectionEnclosureName=powerSupplyConnectionEnclosureName, enclosureServiceTag=enclosureServiceTag, arrayDiskLogicalConnectionDiskName=arrayDiskLogicalConnectionDiskName, batteryConnectionNumber=batteryConnectionNumber, dell=dell, fanProbeMinCritical=fanProbeMinCritical, enclosureManagementModuleRollUpStatus=enclosureManagementModuleRollUpStatus, fluidCacheName=fluidCacheName, alertVirtualDiskPartitionFailure=alertVirtualDiskPartitionFailure, powerSupply2PartNumber=powerSupply2PartNumber, virtualDiskLengthInBytes=virtualDiskLengthInBytes, alertArrayDiskInformation=alertArrayDiskInformation, alertEnclosureWarning=alertEnclosureWarning, controllerPartnerStatus=controllerPartnerStatus, enclosureRollUpStatus=enclosureRollUpStatus, agentMibVersion=agentMibVersion, arrayDiskComponentStatus=arrayDiskComponentStatus, alertPowerSupplyInformation=alertPowerSupplyInformation, enclosureManagementModuleState=enclosureManagementModuleState, alertControllerNonRecoverable=alertControllerNonRecoverable, batteryComponentStatus=batteryComponentStatus, alertEEMIEnclosureNormal=alertEEMIEnclosureNormal, batteryState=batteryState, arrayDiskLogicalConnectionVirtualDiskNumber=arrayDiskLogicalConnectionVirtualDiskNumber, alertEEMIRedundancyNormal=alertEEMIRedundancyNormal, temperatureConnectionEnclosureName=temperatureConnectionEnclosureName, arrayDiskChannelConnectionArrayDiskName=arrayDiskChannelConnectionArrayDiskName, fluidCacheDiskBackendDeviceName=fluidCacheDiskBackendDeviceName, enclosureNexusID=enclosureNexusID, alertEEMIStorageManagementFailure=alertEEMIStorageManagementFailure, controllerC0Port0WWN=controllerC0Port0WWN, temperatureProbeUnit=temperatureProbeUnit, controllerC1Target=controllerC1Target, enclosureExpressServiceCode=enclosureExpressServiceCode, alertEEMIVirtualDiskInformation=alertEEMIVirtualDiskInformation, controllerC1OSController=controllerC1OSController, enclosureEntry=enclosureEntry, alertChannelWarning=alertChannelWarning, logicalDevices=logicalDevices, controllerClusterMode=controllerClusterMode, arrayDiskSerialNo=arrayDiskSerialNo, currentStatusEvent=currentStatusEvent, batteryConnectionBatteryName=batteryConnectionBatteryName, batteryTable=batteryTable, controllerSCSIInitiatorID=controllerSCSIInitiatorID, alertEEMIFanFailure=alertEEMIFanFailure, arrayDiskUsedSpaceInBytes=arrayDiskUsedSpaceInBytes, channelRollUpStatus=channelRollUpStatus, enclosureManagementModuleConnectionEnclosureNumber=enclosureManagementModuleConnectionEnclosureNumber, arrayDiskSASAddress=arrayDiskSASAddress, enclosureComponentStatus=enclosureComponentStatus, batteryConnectionEntry=batteryConnectionEntry, virtualDiskAssociatedFluidCacheStatus=virtualDiskAssociatedFluidCacheStatus, alertEEMIVirtualDiskWarning=alertEEMIVirtualDiskWarning, alertEEMITemperatureProbeInformation=alertEEMITemperatureProbeInformation, alertEEMIVirtualDiskPartitionInformation=alertEEMIVirtualDiskPartitionInformation, controllerConfiguredDrivesSpinUpTime=controllerConfiguredDrivesSpinUpTime, controllerMemorySizeInBytes=controllerMemorySizeInBytes, fluidCacheLicenseDuration=fluidCacheLicenseDuration, alertStorageManagementWarning=alertStorageManagementWarning, controllerDriverVersion=controllerDriverVersion, alertVirtualDiskInformation=alertVirtualDiskInformation, controllerReconstructRate=controllerReconstructRate, virtualDiskTable=virtualDiskTable, virtualDiskLengthInMB=virtualDiskLengthInMB, virtualDiskEncrypted=virtualDiskEncrypted, alertEEMIFluidCacheInformation=alertEEMIFluidCacheInformation, powerSupplyNexusID=powerSupplyNexusID, powerSupplyRevision=powerSupplyRevision, arrayDiskEnclosureConnectionControllerNumber=arrayDiskEnclosureConnectionControllerNumber, fluidCacheLicenseEntitlementID=fluidCacheLicenseEntitlementID, agentGlobalSystemStatus=agentGlobalSystemStatus, arrayDiskNexusID=arrayDiskNexusID, arrayDiskLifeRemaining=arrayDiskLifeRemaining, controllerSpinDownHotSpareDrives=controllerSpinDownHotSpareDrives, controllerPreservedCache=controllerPreservedCache, fluidCachePoolLicenseState=fluidCachePoolLicenseState, channelState=channelState, channelSeverity=channelSeverity, enclosurePartNumber=enclosurePartNumber, alertEEMITemperatureProbeNonRecoverable=alertEEMITemperatureProbeNonRecoverable, alertEEMIFanInformation=alertEEMIFanInformation, arrayDiskMediaType=arrayDiskMediaType, arrayDiskChannelConnectionControllerNumber=arrayDiskChannelConnectionControllerNumber, fanState=fanState, enclosureManagementModuleTable=enclosureManagementModuleTable, arrayDiskChannelConnectionNumber=arrayDiskChannelConnectionNumber, arrayDiskMaxLinkWidth=arrayDiskMaxLinkWidth, alertVirtualDiskNonRecoverable=alertVirtualDiskNonRecoverable, alertFluidCachePoolFailure=alertFluidCachePoolFailure, enclosureManagementModuleNexusID=enclosureManagementModuleNexusID, controllerComponentStatus=controllerComponentStatus, alertEEMIPowerSupplyInformation=alertEEMIPowerSupplyInformation, controllerC0OSController=controllerC0OSController, enclosureManagementModulePartNumber=enclosureManagementModulePartNumber, alertEEMIFluidCachePoolFailure=alertEEMIFluidCachePoolFailure, alertVirtualDiskPartitionInformation=alertVirtualDiskPartitionInformation, alertEEMIRedundancyDegraded=alertEEMIRedundancyDegraded, enclosureManagementModuleSeverity=enclosureManagementModuleSeverity, arrayDiskLunID=arrayDiskLunID, locationEvent=locationEvent, alertEEMIStorageManagementWarning=alertEEMIStorageManagementWarning, alertEEMIControllerInformation=alertEEMIControllerInformation, temperatureProbeMinWarning=temperatureProbeMinWarning, enclosureID=enclosureID, controllerPhysicalDeviceCount=controllerPhysicalDeviceCount, tapeDriveNumber=tapeDriveNumber, virtualDiskDeviceName=virtualDiskDeviceName, alertEEMIVirtualDiskPartitionWarning=alertEEMIVirtualDiskPartitionWarning, arrayDiskChannel=arrayDiskChannel, temperatureProbeNumber=temperatureProbeNumber, arrayDiskLogicalConnectionArrayDiskName=arrayDiskLogicalConnectionArrayDiskName, fluidCacheLicenseProductId=fluidCacheLicenseProductId, alertEEMIArrayDiskNormal=alertEEMIArrayDiskNormal, temperatureConnectionNumber=temperatureConnectionNumber, fluidCacheTable=fluidCacheTable, virtualDiskReadPolicy=virtualDiskReadPolicy, controllerAutomaticPowerSaving=controllerAutomaticPowerSaving, arrayDiskLogicalConnectionArrayDiskNumber=arrayDiskLogicalConnectionArrayDiskNumber, arrayDiskModelNumber=arrayDiskModelNumber, alertEEMIBatteryNonRecoverable=alertEEMIBatteryNonRecoverable, temperatureProbeVendor=temperatureProbeVendor, controllerMemorySizeInMB=controllerMemorySizeInMB, channelSCSIID=channelSCSIID, virtualDiskEntry=virtualDiskEntry, alertEEMIEMMFailure=alertEEMIEMMFailure, enclosureSCSIRate=enclosureSCSIRate, controllerNodeWWN=controllerNodeWWN, fanConnectionNumber=fanConnectionNumber, enclosureManagementModuleConnectionEMMNumber=enclosureManagementModuleConnectionEMMNumber, controllerEncryptionMode=controllerEncryptionMode, fluidCacheLicenseDateSold=fluidCacheLicenseDateSold, arrayDiskChannelConnectionArrayDiskNumber=arrayDiskChannelConnectionArrayDiskNumber, channelBusType=channelBusType, batteryVendor=batteryVendor, enclosureSeverity=enclosureSeverity, fluidCachePoolTable=fluidCachePoolTable, fluidCacheNumber=fluidCacheNumber, arrayDiskManufactureWeek=arrayDiskManufactureWeek, arrayDiskState=arrayDiskState, enclosureConfig=enclosureConfig, enclosureManagementModuleMaxSpeed=enclosureManagementModuleMaxSpeed, enhancedMessageIDEvent=enhancedMessageIDEvent, alertEMMInformation=alertEMMInformation, enclosureProcessorVersion=enclosureProcessorVersion, controllerPCISlot=controllerPCISlot, batteryName=batteryName, tapeDriveNexusID=tapeDriveNexusID, alertEEMIVirtualDiskNormal=alertEEMIVirtualDiskNormal, fanEntry=fanEntry, powerSupplyConnectionPowersupplyName=powerSupplyConnectionPowersupplyName, controllerPatrolReadRate=controllerPatrolReadRate, batteryLearnState=batteryLearnState, softwareDescription=softwareDescription, fan1PartNumber=fan1PartNumber, enclosureManagementModuleConnectionTable=enclosureManagementModuleConnectionTable, alertEEMIPowerSupplyNonRecoverable=alertEEMIPowerSupplyNonRecoverable, alertEEMIVirtualDiskPartitionFailure=alertEEMIVirtualDiskPartitionFailure, arrayDiskAltaRevisionId=arrayDiskAltaRevisionId, powerSupplyEntry=powerSupplyEntry, controllerHostPortCount=controllerHostPortCount, controllerC0Port0Name=controllerC0Port0Name, temperatureProbeRollUpStatus=temperatureProbeRollUpStatus, alertControllerNormal=alertControllerNormal, enclosureSCSIID=enclosureSCSIID, arrayDiskRemainingRatedWriteEndurance=arrayDiskRemainingRatedWriteEndurance, controllerC1Port0WWN=controllerC1Port0WWN, fanProbeMinWarning=fanProbeMinWarning, alertEEMIControllerNormal=alertEEMIControllerNormal, agentSoftwareStatus=agentSoftwareStatus, alertfluidCacheWarning=alertfluidCacheWarning, enclosureFirmwareVersion=enclosureFirmwareVersion, alertEEMIArrayDiskNonRecoverable=alertEEMIArrayDiskNonRecoverable, virtualDiskDiskCachePolicy=virtualDiskDiskCachePolicy, alertEEMIControllerFailure=alertEEMIControllerFailure, tapeDriveEntry=tapeDriveEntry, fluidCacheDiskTable=fluidCacheDiskTable, powerSupplyConnectionNumber=powerSupplyConnectionNumber, arrayDiskEncryptionCapable=arrayDiskEncryptionCapable, controllerCacheSizeInBytes=controllerCacheSizeInBytes, powerSupplyRollUpStatus=powerSupplyRollUpStatus, fluidCacheEntry=fluidCacheEntry, agentRefreshRate=agentRefreshRate, storage=storage, batteryRollUpStatus=batteryRollUpStatus, alertArrayDiskWarning=alertArrayDiskWarning, agentIPAddress=agentIPAddress, agentLastGlobalSystemStatus=agentLastGlobalSystemStatus, alertEEMIChannelWarning=alertEEMIChannelWarning, controllerSpinDownConfiguredDrives=controllerSpinDownConfiguredDrives, fanRollUpStatus=fanRollUpStatus, controllerRebuildRateInPercent=controllerRebuildRateInPercent, tapeDriveProductID=tapeDriveProductID, alertEEMIStorageManagementNonRecoverable=alertEEMIStorageManagementNonRecoverable, arrayDiskCapableSpeed=arrayDiskCapableSpeed, tapeDriveTable=tapeDriveTable, arrayDiskEntry=arrayDiskEntry, alertChannelNonRecoverable=alertChannelNonRecoverable, enclosureESM1PartNumber=enclosureESM1PartNumber, controllerPatrolReadMode=controllerPatrolReadMode, alertStorageManagementFailure=alertStorageManagementFailure, enclosureManagementModuleConnectionEntry=enclosureManagementModuleConnectionEntry, fluidCachePoolEntry=fluidCachePoolEntry, alertEEMITemperatureProbeFailure=alertEEMITemperatureProbeFailure, virtualDiskPartitionFluidCacheStatus=virtualDiskPartitionFluidCacheStatus, fanNexusID=fanNexusID, alertEEMIfluidCacheWarning=alertEEMIfluidCacheWarning, virtualDiskName=virtualDiskName, softwareVersion=softwareVersion, arrayDiskChannelConnectionControllerName=arrayDiskChannelConnectionControllerName, temperatureProbeEntry=temperatureProbeEntry, alertEEMIBatteryFailure=alertEEMIBatteryFailure, controllerC0Channel=controllerC0Channel, fanProbeMaxWarning=fanProbeMaxWarning, alertFanFailure=alertFanFailure, enclosureManagementModuleFWVersion=enclosureManagementModuleFWVersion, arrayDiskChannelConnectionChannelName=arrayDiskChannelConnectionChannelName, arrayDiskNumber=arrayDiskNumber, chassisServiceTagEvent=chassisServiceTagEvent, arrayDiskTargetID=arrayDiskTargetID, virtualDiskFreeSpaceInBytes=virtualDiskFreeSpaceInBytes, enclosureVendor=enclosureVendor, arrayDiskChannelConnectionEntry=arrayDiskChannelConnectionEntry, arrayDiskDriverVersion=arrayDiskDriverVersion, fluidCacheDiskStatus=fluidCacheDiskStatus, arrayDiskName=arrayDiskName, alertEMMEMMInformation=alertEMMEMMInformation, alertEnclosureNonRecoverable=alertEnclosureNonRecoverable, temperatureProbeMinCritical=temperatureProbeMinCritical, channelNexusID=channelNexusID, powerSupplyVendor=powerSupplyVendor)
mibBuilder.exportSymbols("StorageManagement-MIB", fluidCacheDiskConfiguredMode=fluidCacheDiskConfiguredMode, powerSupplyState=powerSupplyState, agentLastGlobalStatus=agentLastGlobalStatus, alertEEMIVirtualDiskFailure=alertEEMIVirtualDiskFailure, enclosureESM2PartNumber=enclosureESM2PartNumber, alertEEMIControllerWarning=alertEEMIControllerWarning, controllerMinRequiredStorportVer=controllerMinRequiredStorportVer, objectNameEvent=objectNameEvent, arrayDiskChannelConnectionTable=arrayDiskChannelConnectionTable, alertPowerSupplyWarning=alertPowerSupplyWarning, virtualDiskArrayDiskType=virtualDiskArrayDiskType, serviceTagEvent=serviceTagEvent, arrayDiskReadOnly=arrayDiskReadOnly, alertArrayDiskFailure=alertArrayDiskFailure, arrayDiskEnclosureConnectionEnclosureNumber=arrayDiskEnclosureConnectionEnclosureNumber, controllerMinDriverVersion=controllerMinDriverVersion, fanConnectionTable=fanConnectionTable, enclosureTotalSlots=enclosureTotalSlots, fluidCacheLicenseFeatureID=fluidCacheLicenseFeatureID, arrayDiskEncrypted=arrayDiskEncrypted, virtualDiskWritePolicy=virtualDiskWritePolicy, virtualDiskChannel=virtualDiskChannel, description=description, globalStatus=globalStatus, arrayDiskPartNumber=arrayDiskPartNumber, tapeDriveMediaType=tapeDriveMediaType, fanProbeUnit=fanProbeUnit, alertEnclosureInformation=alertEnclosureInformation, alertFluidCacheFailure=alertFluidCacheFailure, alertEEMIfluidCacheDiskWarning=alertEEMIfluidCacheDiskWarning, arrayDiskEnclosureConnectionNumber=arrayDiskEnclosureConnectionNumber, controllerCacheSizeInMB=controllerCacheSizeInMB, controllerPIEnable=controllerPIEnable, alertFanNonRecoverable=alertFanNonRecoverable, arrayDiskEnclosureConnectionArrayDiskNumber=arrayDiskEnclosureConnectionArrayDiskNumber, alertArrayDiskNonRecoverable=alertArrayDiskNonRecoverable, fanNumber=fanNumber, controllerSpinDownUnconfiguredDrives=controllerSpinDownUnconfiguredDrives, agentModifiers=agentModifiers, enclosureManagementModuleComponentStatus=enclosureManagementModuleComponentStatus, arrayDiskLargestContiguousFreeSpaceInMB=arrayDiskLargestContiguousFreeSpaceInMB, storageManagement=storageManagement, virtualDiskCurStripeSizeInBytes=virtualDiskCurStripeSizeInBytes, alertEEMIEnclosureInformation=alertEEMIEnclosureInformation, alertEMMWarning=alertEMMWarning, enclosureManagementModuleEntry=enclosureManagementModuleEntry, arrayDiskLogicalConnectionVirtualDiskName=arrayDiskLogicalConnectionVirtualDiskName, fluidCacheLicenseCapacity=fluidCacheLicenseCapacity, arrayDiskEnclosureID=arrayDiskEnclosureID, arrayDiskLogicalConnectionTable=arrayDiskLogicalConnectionTable, virtualDiskBadBlocksDetected=virtualDiskBadBlocksDetected, controllerMinFWVersion=controllerMinFWVersion, agentTimeStamp=agentTimeStamp, arrayDiskDriveWriteCache=arrayDiskDriveWriteCache, arrayDiskEnclosureConnectionEnclosureName=arrayDiskEnclosureConnectionEnclosureName, alertEEMIBatteryWarning=alertEEMIBatteryWarning, virtualDiskSeverity=virtualDiskSeverity, arrayDiskAltaProductId=arrayDiskAltaProductId, alertControllerInformation=alertControllerInformation, virtualDiskTargetID=virtualDiskTargetID, arrayDiskDeviceLifeStatus=arrayDiskDeviceLifeStatus, alertChannelFailure=alertChannelFailure, arrayDiskManufactureYear=arrayDiskManufactureYear, controllerBatteryChargeCount=controllerBatteryChargeCount, controllerPatrolReadState=controllerPatrolReadState, fluidCacheDiskEntry=fluidCacheDiskEntry, powerSupplyNumber=powerSupplyNumber, arrayDiskSpareState=arrayDiskSpareState, temperatureConnectionTemperatureName=temperatureConnectionTemperatureName, alertEEMIChannelFailure=alertEEMIChannelFailure, fan2PartNumber=fan2PartNumber, controllerC0Port1WWN=controllerC0Port1WWN, arrayDiskProductID=arrayDiskProductID, alertEEMIBatteryNormal=alertEEMIBatteryNormal, arrayDiskLargestContiguousFreeSpaceInBytes=arrayDiskLargestContiguousFreeSpaceInBytes, controllerFaultTolerant=controllerFaultTolerant, enclosureManagementModuleType=enclosureManagementModuleType, enclosureEmptySlotCount=enclosureEmptySlotCount, enclosureAssetTag=enclosureAssetTag, alertEEMIStorageManagementInformation=alertEEMIStorageManagementInformation, arrayDiskPICapable=arrayDiskPICapable, alertArrayDiskNormal=alertArrayDiskNormal, alertTemperatureProbeInformation=alertTemperatureProbeInformation, enclosureChannelNumber=enclosureChannelNumber, controllerC1Port0Name=controllerC1Port0Name)
| 186.466055
| 12,885
| 0.788931
|
ff81588e22ef7aae1d9d90c995f2630aee117891
| 114
|
py
|
Python
|
tests/version_test.py
|
plumerai/zoo
|
55e8ce9a42fb8806503e16fc2340f0fd27948d09
|
[
"Apache-2.0"
] | 89
|
2019-07-12T17:40:55.000Z
|
2022-03-27T03:08:31.000Z
|
tests/version_test.py
|
plumerai/zoo
|
55e8ce9a42fb8806503e16fc2340f0fd27948d09
|
[
"Apache-2.0"
] | 99
|
2019-07-12T17:54:04.000Z
|
2022-03-17T23:01:43.000Z
|
tests/version_test.py
|
plumerai/zoo
|
55e8ce9a42fb8806503e16fc2340f0fd27948d09
|
[
"Apache-2.0"
] | 17
|
2019-08-30T08:38:57.000Z
|
2022-02-08T08:41:38.000Z
|
import larq_zoo
def test_version():
assert hasattr(larq_zoo, "__version__") and "." in larq_zoo.__version__
| 19
| 75
| 0.745614
|
1d8118b4f91f7018bd524a811d16e7cf6d3e957a
| 655
|
py
|
Python
|
geovoronoi/__init__.py
|
mjziebarth/geovoronoi
|
40c65aefa1c754975c41bda57279e289c6b04222
|
[
"Apache-2.0"
] | null | null | null |
geovoronoi/__init__.py
|
mjziebarth/geovoronoi
|
40c65aefa1c754975c41bda57279e289c6b04222
|
[
"Apache-2.0"
] | null | null | null |
geovoronoi/__init__.py
|
mjziebarth/geovoronoi
|
40c65aefa1c754975c41bda57279e289c6b04222
|
[
"Apache-2.0"
] | null | null | null |
"""
geovoronoi – main module
Imports all necessary functions to calculate Voronoi regions from a set of coordinates on a geographic shape.
Addtionally imports some helper funcitons.
Author: Markus Konrad <markus.konrad@wzb.eu>
"""
from ._voronoi import (coords_to_points, points_to_coords, voronoi_regions_from_coords, polygon_lines_from_voronoi,
polygon_shapes_from_voronoi_lines, assign_points_to_voronoi_polygons,
get_points_to_poly_assignments)
from ._geom import calculate_polygon_areas
__title__ = 'geovoronoi'
__version__ = '0.1.2'
__author__ = 'Markus Konrad'
__license__ = 'Apache License 2.0'
| 31.190476
| 115
| 0.766412
|
75c8c117e3e3da22609c5e9e2ec9f22e91dfadf9
| 17
|
py
|
Python
|
tests/unit/test_modulegraph/testpkg-relimport/pkg/sub2/__init__.py
|
hawkhai/pyinstaller
|
016a24479b34de161792c72dde455a81ad4c78ae
|
[
"Apache-2.0"
] | 9,267
|
2015-01-01T04:08:45.000Z
|
2022-03-31T11:42:38.000Z
|
tests/unit/test_modulegraph/testpkg-relimport/pkg/sub2/__init__.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 5,150
|
2015-01-01T12:09:56.000Z
|
2022-03-31T18:06:12.000Z
|
tests/unit/test_modulegraph/testpkg-relimport/pkg/sub2/__init__.py
|
jeremysanders/pyinstaller
|
321b24f9a9a5978337735816b36ca6b4a90a2fb4
|
[
"Apache-2.0"
] | 2,101
|
2015-01-03T10:25:27.000Z
|
2022-03-30T11:04:42.000Z
|
""" pkg.sub2 """
| 8.5
| 16
| 0.411765
|
2da74eb26a0d832e031e4c30ed6e0a68731e4ab2
| 20,837
|
py
|
Python
|
ocrd_anybaseocr/cli/ocrd_anybaseocr_cropping.py
|
wrznr/OCR-D-LAYoutERkennung
|
e94ee3b462e21f66b79e6cbebd959c03d631c52b
|
[
"Apache-2.0"
] | null | null | null |
ocrd_anybaseocr/cli/ocrd_anybaseocr_cropping.py
|
wrznr/OCR-D-LAYoutERkennung
|
e94ee3b462e21f66b79e6cbebd959c03d631c52b
|
[
"Apache-2.0"
] | null | null | null |
ocrd_anybaseocr/cli/ocrd_anybaseocr_cropping.py
|
wrznr/OCR-D-LAYoutERkennung
|
e94ee3b462e21f66b79e6cbebd959c03d631c52b
|
[
"Apache-2.0"
] | null | null | null |
# ======================================================================
# ====================================
# README file for Page Cropping component
# ====================================
# Filename : ocrd-anyBaseOCR-pagecropping.py
# Author: Syed Saqib Bukhari, Mohammad Mohsin Reza, Md. Ajraf Rakib
# Responsible: Syed Saqib Bukhari, Mohammad Mohsin Reza, Md. Ajraf Rakib
# Contact Email: Saqib.Bukhari@dfki.de, Mohammad_mohsin.reza@dfki.de, Md_ajraf.rakib@dfki.de
# Note:
# 1) this work has been done in DFKI, Kaiserslautern, Germany.
# 2) The parameters values are read from ocrd-anyBaseOCR-parameter.json file. The values can be changed in that file.
# 3) The command line IO usage is based on "OCR-D" project guidelines (https://ocr-d.github.io/). A sample image file (samples/becker_quaestio_1586_00013.tif) and mets.xml (mets.xml) are provided. The sequence of operations is: binarization, deskewing, cropping and dewarping (or can also be: binarization, dewarping, deskewing, and cropping; depends upon use-case).
# *********** Method Behaviour ********************
# This function takes a document image as input and crops/selects the page content
# area only (that's mean remove textual noise as well as any other noise around page content area)
# *********** Method Behaviour ********************
# *********** LICENSE ********************
# Copyright 2018 Syed Saqib Bukhari, Mohammad Mohsin Reza, Md. Ajraf Rakib
# Apache License 2.0
# A permissive license whose main conditions require preservation of copyright
# and license notices. Contributors provide an express grant of patent rights.
# Licensed works, modifications, and larger works may be distributed under
# different terms and without source code.
# *********** LICENSE ********************
# ======================================================================
import os
import numpy as np
from pylsd.lsd import lsd
import ocrolib
import cv2
from PIL import Image
from ..constants import OCRD_TOOL
from ocrd import Processor
from ocrd_utils import getLogger, concat_padded, MIMETYPE_PAGE
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import (
CoordsType,
to_xml,
MetadataItemType,
LabelsType, LabelType
)
from ocrd_models.ocrd_page_generateds import BorderType
TOOL = 'ocrd-anybaseocr-crop'
LOG = getLogger('OcrdAnybaseocrCropper')
FALLBACK_IMAGE_GRP = 'OCR-D-IMG-CROP'
class OcrdAnybaseocrCropper(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(OcrdAnybaseocrCropper, self).__init__(*args, **kwargs)
def write_crop_coordinate(self, base, coordinate):
x1, y1, x2, y2 = coordinate
with open(base + '-frame-pf.dat', 'w') as fp:
fp.write(str(x1)+"\t"+str(y1)+"\t"+str(x2-x1)+"\t"+str(y2-y1))
def rotate_image(self, orientation, image):
return image.rotate(orientation)
def remove_rular(self, arg):
#base = arg.split(".")[0]
#img = cv2.cvtColor(arg, cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(arg, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(
gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
height, width, _ = arg.shape
imgArea = height*width
# Get bounding box x,y,w,h of each contours
rects = [cv2.boundingRect(cnt) for cnt in contours]
rects = sorted(rects, key=lambda x: (x[2]*x[3]), reverse=True)
# consider those rectangle whose area>10000 and less than one-fourth of images
rects = [r for r in rects if (
imgArea*self.parameter['maxRularArea']) > (r[2]*r[3]) > (imgArea*self.parameter['minRularArea'])]
# detect child rectangles. Usually those are not ruler. Rular position are basically any one side.
removeRect = []
for i, rect1 in enumerate(rects):
(x1, y1, w1, h1) = rect1
for rect2 in rects[i+1:len(rects)]:
(x2, y2, w2, h2) = rect2
if (x1 < x2) and (y1 < y2) and (x1+w1 > x2+w2) and (y1+h1 > y2+h2):
removeRect.append(rect2)
# removed child rectangles.
rects = [x for x in rects if x not in removeRect]
predictRular = []
for rect in rects:
(x, y, w, h) = rect
if (w < width*self.parameter['rularWidth']) and ((y > height*self.parameter['positionBelow']) or ((x+w) < width*self.parameter['positionLeft']) or (x > width*self.parameter['positionRight'])):
if (self.parameter['rularRatioMin'] < round(float(w)/float(h), 2) < self.parameter['rularRatioMax']) or (self.parameter['rularRatioMin'] < round(float(h)/float(w), 2) < self.parameter['rularRatioMax']):
blackPixel = np.count_nonzero(arg[y:y+h, x:x+w] == 0)
predictRular.append((x, y, w, h, blackPixel))
# Finally check number of black pixel to avoid false rular
if predictRular:
predictRular = sorted(
predictRular, key=lambda x: (x[4]), reverse=True)
x, y, w, h, _ = predictRular[0]
cv2.rectangle(arg, (x-15, y-15), (x+w+20, y+h+20),
(255, 255, 255), cv2.FILLED)
return arg
def BorderLine(self, MaxBoundary, lines, index, flag, lineDetectH, lineDetectV):
getLine = 1
LastLine = []
if flag in ('top', 'left'):
for i in range(len(lines)-1):
if(abs(lines[i][index]-lines[i+1][index])) <= 15 and lines[i][index] < MaxBoundary:
LastLine = [lines[i][0], lines[i]
[1], lines[i][2], lines[i][3]]
getLine += 1
elif getLine >= 3:
break
else:
getLine = 1
elif flag in ('bottom', 'right'):
for i in reversed(list(range(len(lines)-1))):
if(abs(lines[i][index]-lines[i+1][index])) <= 15 and lines[i][index] > MaxBoundary:
LastLine = [lines[i][0], lines[i]
[1], lines[i][2], lines[i][3]]
getLine += 1
elif getLine >= 3:
break
else:
getLine = 1
if getLine >= 3 and LastLine:
if flag == "top":
lineDetectH.append((
LastLine[0], max(LastLine[1], LastLine[3]),
LastLine[2], max(LastLine[1], LastLine[3])
))
if flag == "left":
lineDetectV.append((
max(LastLine[0], LastLine[2]), LastLine[1],
max(LastLine[0], LastLine[2]), LastLine[3]
))
if flag == "bottom":
lineDetectH.append((
LastLine[0], min(LastLine[1], LastLine[3]),
LastLine[2], min(LastLine[1], LastLine[3])
))
if flag == "right":
lineDetectV.append((
min(LastLine[0], LastLine[2]), LastLine[1],
min(LastLine[0], LastLine[2]), LastLine[3]
))
def get_intersect(self, a1, a2, b1, b2):
s = np.vstack([a1, a2, b1, b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2)
if z == 0:
# return (float('inf'), float('inf'))
return (0, 0)
return (x/z, y/z)
def detect_lines(self, arg):
Hline = []
Vline = []
gray = cv2.cvtColor(arg, cv2.COLOR_RGB2GRAY)
imgHeight, imgWidth, _ = arg.shape
lines = lsd(gray)
for i in range(lines.shape[0]):
pt1 = (int(lines[i, 0]), int(lines[i, 1]))
pt2 = (int(lines[i, 2]), int(lines[i, 3]))
# consider those line whise length more than this orbitrary value
if (abs(pt1[0]-pt2[0]) > 45) and ((int(pt1[1]) < imgHeight*0.25) or (int(pt1[1]) > imgHeight*0.75)):
# make full horizontal line
Hline.append([0, int(pt1[1]), imgWidth, int(pt2[1])])
if (abs(pt1[1]-pt2[1]) > 45) and ((int(pt1[0]) < imgWidth*0.4) or (int(pt1[0]) > imgWidth*0.6)):
# make full vertical line
Vline.append([int(pt1[0]), 0, int(pt2[0]), imgHeight])
Hline.sort(key=lambda x: (x[1]), reverse=False)
Vline.sort(key=lambda x: (x[0]), reverse=False)
return imgHeight, imgWidth, Hline, Vline
def select_borderLine(self, arg, lineDetectH, lineDetectV):
imgHeight, imgWidth, Hlines, Vlines = self.detect_lines(arg)
# top side
self.BorderLine(imgHeight*0.25, Hlines, 1,
"top", lineDetectH, lineDetectV)
# left side
self.BorderLine(imgWidth*0.4, Vlines, 0, "left",
lineDetectH, lineDetectV)
# bottom side
self.BorderLine(imgHeight*0.75, Hlines, 1,
"bottom", lineDetectH, lineDetectV)
# right side
self.BorderLine(imgWidth*0.6, Vlines, 0, "right",
lineDetectH, lineDetectV)
intersectPoint = []
for l1 in lineDetectH:
for l2 in lineDetectV:
x, y = self.get_intersect(
(l1[0], l1[1]),
(l1[2], l1[3]),
(l2[0], l2[1]),
(l2[2], l2[3])
)
intersectPoint.append([x, y])
Xstart = 0
Xend = imgWidth
Ystart = 0
Yend = imgHeight
for i in intersectPoint:
Xs = int(i[0])+10 if i[0] < imgWidth*0.4 else 10
if Xs > Xstart:
Xstart = Xs
Xe = int(i[0])-10 if i[0] > imgWidth*0.6 else int(imgWidth)-10
if Xe < Xend:
Xend = Xe
Ys = int(i[1])+10 if i[1] < imgHeight*0.25 else 10
# print("Ys,Ystart:",Ys,Ystart)
if Ys > Ystart:
Ystart = Ys
Ye = int(i[1])-15 if i[1] > imgHeight*0.75 else int(imgHeight)-15
if Ye < Yend:
Yend = Ye
if Xend < 0:
Xend = 10
if Yend < 0:
Yend = 15
#self.save_pf(base, [Xstart, Ystart, Xend, Yend])
return [Xstart, Ystart, Xend, Yend]
def filter_noisebox(self, textarea, height, width):
tmp = []
st = True
while st:
textarea = [list(x) for x in textarea if x not in tmp]
if len(textarea) > 1:
tmp = []
textarea = sorted(
textarea, key=lambda x: (x[3]), reverse=False)
# print textarea
x11, y11, x12, y12 = textarea[0]
x21, y21, x22, y22 = textarea[1]
if abs(y12-y21) > 100 and (float(abs(x12-x11)*abs(y12-y11))/(height*width)) < 0.001:
tmp.append(textarea[0])
x11, y11, x12, y12 = textarea[-2]
x21, y21, x22, y22 = textarea[-1]
if abs(y12-y21) > 100 and (float(abs(x21-x22)*abs(y22-y21))/(height*width)) < 0.001:
tmp.append(textarea[-1])
if len(tmp) == 0:
st = False
else:
break
return textarea
def detect_textarea(self, arg):
textarea = []
small = cv2.cvtColor(arg, cv2.COLOR_RGB2GRAY)
height, width, _ = arg.shape
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(
grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (10, 1)) # for historical docs
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
contours, _ = cv2.findContours(
connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
# print x,y,w,h
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and (width*0.9) > w > 15 and (height*0.5) > h > 15:
textarea.append([x, y, x+w-1, y+h-1])
cv2.rectangle(arg, (x, y), (x+w-1, y+h-1), (0, 0, 255), 2)
if len(textarea) > 1:
textarea = self.filter_noisebox(textarea, height, width)
return textarea, arg, height, width
def save_pf(self, base, textarea):
x1, y1, x2, y2 = textarea
img = Image.open(base+'.pf.png')
img2 = img.crop((x1, y1, x2, y2))
img2.save(base + '.pf.png')
self.write_crop_coordinate(base, textarea)
def filter_area(self, textarea, binImg):
height, width, _ = binImg.shape
tmp = []
for area in textarea:
if (height*width*self.parameter['minArea'] < (abs(area[2]-area[0]) * abs(area[3]-area[1]))):
tmp.append(area)
return tmp
def marge_columns(self, textarea):
tmp = []
marge = []
# height, _ = binImg.shape
textarea.sort(key=lambda x: (x[0]))
# print self.parameter['colSeparator']
for i in range(len(textarea)-1):
st = False
x11, y11, x12, y12 = textarea[i]
x21, y21, x22, y22 = textarea[i+1]
if x21-x12 <= self.parameter['colSeparator']:
if len(marge) > 0:
# print "marge ", marge[0]
x31, y31, x32, y32 = marge[0]
marge.pop(0)
else:
x31, y31, x32, y32 = [9999, 9999, 0, 0]
marge.append([min(x11, x21, x31), min(y11, y21, y31),
max(x12, x22, x32), max(y12, y22, y32)])
st = True
else:
tmp.append(textarea[i])
if not st:
tmp.append(textarea[-1])
return tmp+marge
def crop_area(self, textarea, binImg, rgb):
height, width, _ = binImg.shape
textarea = np.unique(textarea, axis=0)
i = 0
tmp = []
areas = []
while i < len(textarea):
textarea = [list(x) for x in textarea if x not in tmp]
tmp = []
if len(textarea) == 0:
break
maxBox = textarea[0]
for chkBox in textarea:
if maxBox != chkBox:
x11, y11, x12, y12 = maxBox
x21, y21, x22, y22 = chkBox
if ((x11 <= x21 <= x12) or (x21 <= x11 <= x22)):
tmp.append(maxBox)
tmp.append(chkBox)
maxBox = [min(x11, x21), min(y11, y21),
max(x12, x22), max(y12, y22)]
if len(tmp) == 0:
tmp.append(maxBox)
x1, y1, x2, y2 = maxBox
areas.append(maxBox)
cv2.rectangle(rgb, (x1, y1), (x2, y2), (255, 0, 0), 2)
i = i+1
textarea = np.unique(areas, axis=0).tolist()
if len(textarea) > 0:
textarea = self.filter_area(textarea, binImg)
if len(textarea) > 1:
textarea = self.marge_columns(textarea)
# print textarea
if len(textarea) > 0:
textarea = sorted(textarea, key=lambda x: (
(x[2]-x[0])*(x[3]-x[1])), reverse=True)
# print textarea
x1, y1, x2, y2 = textarea[0]
x1 = x1-20 if x1 > 20 else 0
x2 = x2+20 if x2 < width-20 else width
y1 = y1-40 if y1 > 40 else 0
y2 = y2+40 if y2 < height-40 else height
#self.save_pf(base, [x1, y1, x2, y2])
return textarea
def process(self):
try:
self.page_grp, self.image_grp = self.output_file_grp.split(',')
except ValueError:
self.page_grp = self.output_file_grp
self.image_grp = FALLBACK_IMAGE_GRP
LOG.info("No output file group for images specified, falling back to '%s'", FALLBACK_IMAGE_GRP)
oplevel = self.parameter['operation_level']
for (n, input_file) in enumerate(self.input_files):
file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)
page_id = input_file.pageId or input_file.ID
LOG.info("INPUT FILE %i / %s", n, page_id)
pcgts = page_from_file(self.workspace.download_file(input_file))
metadata = pcgts.get_Metadata()
metadata.add_MetadataItem(
MetadataItemType(type_="processingStep",
name=self.ocrd_tool['steps'][0],
value=TOOL,
Labels=[LabelsType(#externalRef="parameters",
Label=[LabelType(type_=name,
value=self.parameter[name])
for name in self.parameter.keys()])]))
page = pcgts.get_Page()
print(page.imageFilename)
#page_image, page_xywh, page_image_info = self.workspace.image_from_page(page, page_id)
'''
# Get image orientation
orientation = page.get_orientation()
rotated_image = self.rotate_image(orientation, page_image)
LOG.info("INPUT FILE %s ", input_file.pageId or input_file.ID)
img_array = ocrolib.pil2array(rotated_image)
#Check if image is RGB or not
if len(img_array.shape)==2:
img_array = np.stack((img_array,)*3, axis=-1)
img_array_bin = np.array(
img_array > ocrolib.midrange(img_array), 'i')
lineDetectH = []
lineDetectV = []
img_array_rr = self.remove_rular(img_array)
textarea, img_array_rr_ta, height, width = self.detect_textarea(
img_array_rr)
self.parameter['colSeparator'] = int(
width * self.parameter['colSeparator'])
if len(textarea) > 1:
textarea = self.crop_area(
textarea, img_array_bin, img_array_rr_ta)
if len(textarea) == 0:
min_x, min_y, max_x, max_y = self.select_borderLine(
img_array_rr, lineDetectH, lineDetectV)
else:
min_x, min_y, max_x, max_y = textarea[0]
elif len(textarea) == 1 and (height*width*0.5 < (abs(textarea[0][2]-textarea[0][0]) * abs(textarea[0][3]-textarea[0][1]))):
x1, y1, x2, y2 = textarea[0]
x1 = x1-20 if x1 > 20 else 0
x2 = x2+20 if x2 < width-20 else width
y1 = y1-40 if y1 > 40 else 0
y2 = y2+40 if y2 < height-40 else height
#self.save_pf(base, [x1, y1, x2, y2])
min_x, min_y, max_x, max_y = textarea[0]
else:
min_x, min_y, max_x, max_y = self.select_borderLine(
img_array_rr, lineDetectH, lineDetectV)
brd = BorderType(Coords=CoordsType("%i,%i %i,%i %i,%i %i,%i" % (
min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y)))
pcgts.get_Page().set_Border(brd)
# Use input_file's basename for the new file -
# this way the files retain the same basenames:
file_id = input_file.ID.replace(self.input_file_grp, self.output_file_grp)
if file_id == input_file.ID:
file_id = concat_padded(self.output_file_grp, n)
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(self.output_file_grp,
file_id + '.xml'),
content=to_xml(pcgts).encode('utf-8')
)
'''
| 41.179842
| 366
| 0.51279
|
e6ddc7e807df3e9ed66f05e57398519f7640921e
| 1,939
|
py
|
Python
|
User.py
|
lilian-2021/passwordLocker
|
d641adc3b3ffc3d720f5db6e0bef1ffa310a71f0
|
[
"Unlicense"
] | null | null | null |
User.py
|
lilian-2021/passwordLocker
|
d641adc3b3ffc3d720f5db6e0bef1ffa310a71f0
|
[
"Unlicense"
] | null | null | null |
User.py
|
lilian-2021/passwordLocker
|
d641adc3b3ffc3d720f5db6e0bef1ffa310a71f0
|
[
"Unlicense"
] | null | null | null |
class User:
"""
Class that generates new instances of User
"""
listOfUser=[]
def __init__(self,first_name,last_name,password,email):
'''
__init__ method that helps us define properties for our objects.
Args:
first_name: New user first name.
last_name : New user last name.
password : New user password.
email : New user email address.
'''
self.first_name = first_name
self.last_name = last_name
self.password = password
self.email = email
def save_User(self):
'''
function to add a new user to the listofUser array
'''
User.listOfUser.append(self)
def delete_User(self):
'''
method deletes a saved user from the user list.
'''
User.listOfUser.remove(self)
@classmethod
def find_by_first_name(cls, first_name):
'''
method that takes in a name and returns a firstname that matches that name.
Args:
firstname:name to search for
Returns:
username of person that matches the firstname.
'''
for User in cls.listOfUser:
if User.first_name == first_name :
return User
@classmethod
def User_exists(cls, first_name):
'''
method that checks if a user exists from the user list.
Args:
first_name: first_name to search the account exists.
Returns:
Boolean: true or false depending if the user exists.
'''
for User in cls.listOfUser:
if User.first_name == first_name:
return True
return False
@classmethod
def display_Users(cls):
return cls.listOfUser
| 26.561644
| 83
| 0.532749
|
bee3e21a239b85df128a726bf9d09de88e4a9df0
| 1,682
|
py
|
Python
|
kolejka/server/blob/migrations/0001_initial.py
|
dtracz/kolejka
|
ff62bd242b2a3f64f44c3b8e6379e083a67f211d
|
[
"MIT"
] | 11
|
2018-03-16T08:27:17.000Z
|
2021-11-14T23:48:47.000Z
|
kolejka/server/blob/migrations/0001_initial.py
|
dtracz/kolejka
|
ff62bd242b2a3f64f44c3b8e6379e083a67f211d
|
[
"MIT"
] | 1
|
2021-08-18T18:05:37.000Z
|
2021-08-18T18:05:37.000Z
|
kolejka/server/blob/migrations/0001_initial.py
|
dtracz/kolejka
|
ff62bd242b2a3f64f44c3b8e6379e083a67f211d
|
[
"MIT"
] | 3
|
2018-01-10T13:57:19.000Z
|
2021-07-24T09:54:27.000Z
|
# Generated by Django 3.0.5 on 2020-04-22 09:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=64, unique=True)),
('active', models.BooleanField(default=True)),
('size', models.BigIntegerField()),
('time_create', models.DateTimeField(auto_now_add=True)),
('time_access', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Reference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=64, unique=True)),
('time_create', models.DateTimeField(auto_now_add=True)),
('time_access', models.DateTimeField(auto_now=True)),
('permanent', models.BooleanField(default=False)),
('public', models.BooleanField(default=False)),
('blob', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='blob.Blob')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 40.047619
| 118
| 0.601665
|
55f04180479359fe2ac14b3f09cec572cfeeca4d
| 3,179
|
py
|
Python
|
Tools/px4params/xmlout.py
|
dzett/PX4-Firmware
|
9919ba1581c24ddd5d10d6b7530929df843537c8
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/px4params/xmlout.py
|
dzett/PX4-Firmware
|
9919ba1581c24ddd5d10d6b7530929df843537c8
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/px4params/xmlout.py
|
dzett/PX4-Firmware
|
9919ba1581c24ddd5d10d6b7530929df843537c8
|
[
"BSD-3-Clause"
] | null | null | null |
import xml.etree.ElementTree as ET
import codecs
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class XMLOutput():
def __init__(self, groups, board, inject_xml_file_name):
xml_parameters = ET.Element("parameters")
xml_version = ET.SubElement(xml_parameters, "version")
xml_version.text = "3"
xml_version = ET.SubElement(xml_parameters, "parameter_version_major")
xml_version.text = "1"
xml_version = ET.SubElement(xml_parameters, "parameter_version_minor")
xml_version.text = "8"
importtree = ET.parse(inject_xml_file_name)
injectgroups = importtree.getroot().findall("group")
for igroup in injectgroups:
xml_parameters.append(igroup)
last_param_name = ""
board_specific_param_set = False
for group in groups:
xml_group = ET.SubElement(xml_parameters, "group")
xml_group.attrib["name"] = group.GetName()
for param in group.GetParams():
if (last_param_name == param.GetName() and not board_specific_param_set) or last_param_name != param.GetName():
xml_param = ET.SubElement(xml_group, "parameter")
xml_param.attrib["name"] = param.GetName()
xml_param.attrib["default"] = param.GetDefault()
xml_param.attrib["type"] = param.GetType()
last_param_name = param.GetName()
for code in param.GetFieldCodes():
value = param.GetFieldValue(code)
if code == "board":
if value == board:
board_specific_param_set = True
xml_field = ET.SubElement(xml_param, code)
xml_field.text = value
else:
xml_group.remove(xml_param)
else:
xml_field = ET.SubElement(xml_param, code)
xml_field.text = value
if last_param_name != param.GetName():
board_specific_param_set = False
if len(param.GetEnumCodes()) > 0:
xml_values = ET.SubElement(xml_param, "values")
for code in param.GetEnumCodes():
xml_value = ET.SubElement(xml_values, "value")
xml_value.attrib["code"] = code;
xml_value.text = param.GetEnumValue(code)
indent(xml_parameters)
self.xml_document = ET.ElementTree(xml_parameters)
def Save(self, filename):
self.xml_document.write(filename, encoding="UTF-8")
| 44.774648
| 127
| 0.540422
|
b9a49fc80c70c0fc66633d334f44594312854068
| 20,608
|
py
|
Python
|
src/generic.py
|
Hailecho/LocusTreeInference
|
cb275e70e4ee4feb333fcac7405da856ca2f6324
|
[
"MIT"
] | null | null | null |
src/generic.py
|
Hailecho/LocusTreeInference
|
cb275e70e4ee4feb333fcac7405da856ca2f6324
|
[
"MIT"
] | null | null | null |
src/generic.py
|
Hailecho/LocusTreeInference
|
cb275e70e4ee4feb333fcac7405da856ca2f6324
|
[
"MIT"
] | null | null | null |
"""
Methods to handle decomposition results regardless of algorithm used
"""
from warnings import warn
import re
def assign_ranks(tree):
"""
Assings taxonomic ranks (depths of a node) to a tree.
Modifies the tree in situ.
:param tree: ete2.Tree
:return: None
"""
T = tree
for n in T.traverse(strategy="postorder"):
if n.is_leaf():
n.rank = 0
else:
n.rank = max(c.rank for c in n.children) + 1
class Decomposition(object):
def __init__(self, gene_tree, species_tree, roots):
"""
A locus decomposition of a gene tree with respect to a given species tree.
:param gene_tree: ete2.Tree
A gene tree. Each leaf name needs to correspond exactly to one leaf name of the species tree.
:param species_tree: ete2.Tree
A ranked species tree. Each node nees to have a 'rank' attribute.
If the supplied species tree is not ranked, it is possible to assign artificial
ranks after initializing the object by running
the method assign_topological_ranks() from this class.
The ranks need to be assigned prior to most operations on the decomposition.
:param roots: list
A list of identifiers (in post-order numbering) of the roots of trees from the decomposition forest.
"""
self.G = gene_tree.copy()
for i, g in enumerate(self.G.traverse(strategy="postorder")):
g.nid = i # used to identify e.g. forest roots
self.G_labelled = False # whether G has been labelled with raw I/P mappings
self.S = species_tree.copy()
self.roots = sorted(roots)
self.F = [] # forest; self.F[i] corresponds to self.roots[i]
self.L = None # locus tree
self.colorized = False # whether the locus tree has been colorized for rendering
self._map_gene_to_species()
# self._assign_locus_ids()
def assign_topological_ranks(self):
"""
Assign ranks to the nodes of the species tree.
A topological rank is equal to the depth of the node.
:return: None
"""
for n in self.S.traverse(strategy="postorder"):
if n.is_leaf():
n.rank = 0
else:
n.rank = max(c.rank for c in n.children) + 1
def _map_gene_to_species(self):
for l in self.G:
matching_species = []
for s in self.S:
if re.match(s.name, l.name):
matching_species.append(s)
matching_nb = len(matching_species)
if matching_nb == 0:
raise ValueError('Gene tree leaf %s does not correspond to any species!' % original_gene_names[i])
elif matching_nb > 1:
raise ValueError('Ambiguous species mapping for gene tree leaf %s' % (original_gene_names[i], matching_names[0], matching_names[1]))
else:
l.species = matching_species[0]
## for l in self.G:
## l.species = self.S.get_leaves_by_name(l.name.split('_')[0])
## assert len(l.species) == 1, "Species names are not unique!"
## l.species = l.species[0]
def _assign_locus_ids(self):
"""
Assigns IDs of loci to nodes of the gene tree.
:return:
"""
locus = 0
loci_to_process = [self.L]
while loci_to_process:
root = loci_to_process.pop()
root.locus_id = locus
for n in root.iter_descendants(strategy="postorder",
is_leaf_fn=lambda x: x.source and x != root):
# this will initially assign an improper locus id to a source node,
# but it will be overriden in the next iteration:
n.locus_id = locus
if n.source:
loci_to_process.append(n)
locus += 1
def _compute_adjusted_mappings(self):
"""
Computes the I/P mappings based on neighbouring loci.
For any node, when computing it's mapping values, the tree is temporarily pruned
to contain only the loci of the node's children.
:return: ete2.Tree object
A tree with I and P mapping values added as attributes to each node.
The value of mapping X is stored as node.X attribute.
"""
S = self.S
L = self.L
try:
d = S.rank
except AttributeError:
print("Species tree not initialized; Assign ranks before computing mappings.")
raise
try:
L.locus_id
except AttributeError:
print("Locus tree not initialized; Assign locus IDs before computing mappings.")
raise
# I mapping
pureM = dict((g.nid, S) for g in L.traverse(strategy="postorder")) # I from single loci
combinedM = pureM.copy() # I at junction nodes from neighbouring loci
smap = dict((g, S) for g in L)
for g in L.traverse(strategy="postorder"):
if g.is_leaf():
g_species = g.name.split('_')[0]
g_species = S.get_leaves_by_name(name=g_species)[0]
pureM[g] = g_species
combinedM[g] = g_species
smap[g] = g_species
g.I = 0
g.P = 0
else:
g.P = None # init
# computing pureM
same_locus = [c for c in g.children if c.locus_id == g.locus_id]
same_pureM = [pureM[c] for c in same_locus if pureM[c] is not None]
if not same_locus or not same_pureM:
pureM[g] = None
warn("Detected an extinct lineage (node id %i); This may indicate corrupted data." % g.nid)
print(g.get_ascii(attributes=['name', 'nid']))
elif len(same_pureM) == 1:
pureM[g] = same_pureM[0]
else:
pureM[g] = S.get_common_ancestor(same_pureM)
# computing combinedM and I mapping
all_pureM = [pureM[c] for c in g.children if pureM[c] is not None]
if not all_pureM:
combinedM[g] = None
g.I = None
elif len(all_pureM) == 1:
combinedM[g] = all_pureM[0]
g.I = combinedM[g].rank
else:
combinedM[g] = S.get_common_ancestor(all_pureM)
g.I = combinedM[g].rank
# P mapping
for s in S.traverse():
s.lastvisited = None
leaves = [l for l in L]
for i in range(0, d + 1):
for lid, l1 in enumerate(leaves):
if smap[l1].rank == i:
for l2 in leaves[(lid + 1):]:
if smap[l2] == smap[l1]:
p = l1.get_common_ancestor(l2)
locus_set = [p.locus_id] + [c.locus_id for c in p.children]
if p.P is None and {l1.locus_id, l2.locus_id}.issubset(locus_set):
p.P = i
smap[l1] = smap[l1].up
def _lift_roots(self):
"""
Labels the source nodes, i.e. the ends of cut edges, by adding a boolean 'source' attribute
to each node of the locus tree.
Maximizes the number of junction nodes with adjusted P mapping value = 0.
:return:
"""
# def _score(nid1, nid2):
# """
# Score a path joining two roots from the forest.
# The roots are identified by their postorder IDs: nid1, nid2.
# :return: float
# """
# return 1 if {nid1, nid2} in pairs else 0
def _score(node, nid1, nid2):
"""
Score a path joining two roots from the forest.
The roots are identified by their postorder IDs: nid1, nid2.
:return: float
"""
r1 = L.search_nodes(nid = nid1)[0]
r2 = L.search_nodes(nid = nid2)[0]
sp1 = [l.species for l in r1]
sp2 = [l.species for l in r2]
I1 = sp1[0].get_common_ancestor(sp1).rank
I2 = sp2[0].get_common_ancestor(sp2).rank
I = sp1[0].get_common_ancestor(sp1+sp2).rank
phi = I - (I1 + I2)/2.
phi /= self.S.rank
# _, depth = node.get_farthest_leaf(topology_only=True)
# depth += 1
return (1 if {nid1, nid2} in pairs else 0, 1-phi)
def _psum(*tuples):
return tuple(map(sum, zip(*tuples)))
L = self.L
if not L:
raise ValueError("Locus tree not initialized. Use the locus_tree() method.")
# identifying forests sharing a species
forest = self.forest()
labels = [{l.species for l in f} for f in forest]
pairs = [{forest[i].nid, forest[j].nid} for i in range(len(forest))
for j in range(i + 1, len(forest)) if not labels[i].isdisjoint(labels[j])]
# computing partial costs
for g in L.traverse():
g.cluster_opts = 1 # nb of optimal solutions in cluster
g.source = False
if g.nid in self.roots:
g.inspect = True # whether to compute cost for g
g.roots = [g.nid] # postorder IDs of forest roots visible from g
g.pcosts = [(0., 0.)] # partial costs
g.root = g.nid # index of root joined with g by a path with no cuts
g.optimals = [1] # number of optimal solutions
else:
g.roots = []
g.pcosts = []
g.inspect = False
g.root = -1
g.optimals = []
for g in L.traverse(strategy="postorder"):
if g.is_leaf():
continue
c1, c2 = g.children
if c1.inspect and c2.inspect:
g.inspect = True
g.roots = c1.roots + c2.roots
for i1, r1 in enumerate(c1.roots):
m = max(_psum(c2.pcosts[i2], _score(g, r1, r2)) for i2, r2 in enumerate(c2.roots))
g.pcosts.append(_psum(c1.pcosts[i1], m))
g.optimals.append(c1.optimals[i1] * sum(c2.optimals[i2] for
i2, r2 in enumerate(c2.roots) if
_psum(c2.pcosts[i2], _score(g, r1, r2)) == m))
for i2, r2 in enumerate(c2.roots):
m = max(_psum(c1.pcosts[i1], _score(g, r2, r1)) for i1, r1 in enumerate(c1.roots))
g.pcosts.append(_psum(c2.pcosts[i2], m))
g.optimals.append(c2.optimals[i2] * sum(c1.optimals[i1] for
i1, r1 in enumerate(c1.roots) if
_psum(c1.pcosts[i1], _score(g, r2, r1)) == m))
self.optimal_score = (0, 0)
# backtracking
for g in L.traverse(strategy="preorder"):
if g.inspect and (g.is_root() or not g.up.inspect):
# g is a root of a junction node cluster
g.source = True
if g.is_leaf() and not g.roots == [g.nid]:
raise ValueError("Leaf not a subtree root")
if g.nid in self.roots:
# g is a root of a locus subtree
continue
else:
c1, c2 = g.children
full_cost = (-1, -1)
root1 = -1
root2 = -1
for i1, r1 in enumerate(c1.roots):
for i2, r2 in enumerate(c2.roots):
ncost = _psum(c1.pcosts[i1], c2.pcosts[i2], _score(g, r1, r2))
if ncost > full_cost:
full_cost = ncost
root1 = r1
root2 = r2
assert root1 >= 0 and root2 >= 0
self.optimal_score = _psum(full_cost, self.optimal_score)
g.cluster_opts = sum(c1.optimals[i1] * c2.optimals[i2] for
i1, r1 in enumerate(c1.roots) for i2, r2 in enumerate(c2.roots) if
_psum(c1.pcosts[i1], c2.pcosts[i2], _score(g, r1, r2)) == full_cost)
c2.source = True # arbitrary choice of source below a cluster root
g.root = root1 # joining g with a root by a path with no cuts
c1.root = root1
c2.root = root2
elif g.inspect:
# "internal" node of a junction cluster
if g.is_leaf() and not g.roots == [g.nid]:
raise ValueError("Leaf not a subtree root")
if g.nid in self.roots:
continue
c1, c2 = g.children
if g.root in c1.roots:
c2.source = True
c1.root = g.root
source_child = c2
elif g.root in c2.roots:
c1.source = True
c2.root = g.root
source_child = c1
else:
raise ValueError("Node joined to non-reachable subtree")
max_cost = (-1, -1)
new_root = -1
for i, r in enumerate(source_child.roots):
ncost = _psum(source_child.pcosts[i], _score(g, g.root, r))
if ncost > max_cost:
new_root = r
max_cost = ncost
assert new_root >= 0
source_child.root = new_root
self.number_of_optimal_solutions = 1
for g in L.traverse():
self.number_of_optimal_solutions *= g.cluster_opts
def _assert_sources(self):
internal_nids = [n.nid for f in self.F for n in f.iter_descendants()]
root_nids = [f.nid for f in self.F]
outside_nids = [n.nid for n in self.G.traverse() if n.nid not in internal_nids + root_nids]
source_nids = [n.nid for n in self.L.traverse() if n.source]
junction_nids = [n.nid for n in self.L.traverse() if any(c.source for c in n.children)]
assert junction_nids == outside_nids
def locus_tree(self):
"""
Returns the gene tree with source nodes labelled.
Source nodes represent the locations of evolutionary locus gain events in the gene tree.
Formally, they are defined as the lower nodes of a cut edge.
Each node in the returned tree contains a boolean `source` attribute.
The nodes are also labelled with adjusted I and P mappings (computed based on "neighbouring" locus subtrees).
:return: ete2.Tree object
"""
if self.L:
return self.L
else:
self.L = self.G.copy()
self._lift_roots()
self._assign_locus_ids()
self._compute_adjusted_mappings()
return self.L
def forest(self):
"""
Returns the decomposition as a forest of locus subtrees.
:return: list
A list of ete3.Tree objects.
"""
if self.F:
return self.F
else:
G = self.G.copy()
for r in self.roots:
self.F.append(G.search_nodes(nid=r)[0].detach())
while len(G.children) == 1: # pushing down the root
G = G.children[0]
G.prune(G)
return self.F
def gene_tree(self):
"""
Returns the gene tree, labelled with the raw I/P mapping values (i.e. without considering the locus
structure in the tree).
:return: ete2.Tree
"""
if not self.G_labelled:
compute_mappings(self.G, self.S)
return self.G
def _colorize(self, palette):
"""
Assigns faces and colours to the locus trees for pretty rendering.
:param palette: list
List of strings representing colours in hexadecimal format.
:return:
"""
from ete2 import NodeStyle, faces
if not self.L:
self.locus_tree() # computes & stores the tree
ncol = len(palette)
iFace = faces.AttrFace("I", fsize=8, text_suffix='/')
pFace = faces.AttrFace("P", fsize=8)
# idFace = faces.AttrFace("id", fsize=8)
# suppFace = faces.AttrFace("support", text_suffix=" ", formatter="%.2f", fsize=8)
coloured = dict((i, False) for i, g in enumerate(self.L.traverse(strategy="postorder")))
current_colour = -1
for g in self.L.traverse(strategy="postorder"):
if not g.is_leaf():
# g.add_face(suppFace, position="branch-bottom", column=-2)
g.add_face(iFace, position="branch-top", column=-1)
g.add_face(pFace, position="branch-top", column=0)
if g.source:
current_colour += 1
current_colour %= ncol
style = NodeStyle()
style['vt_line_color'] = palette[current_colour]
style['hz_line_color'] = palette[current_colour]
style['size'] = 0
style['fgcolor'] = '#000000'
style["vt_line_width"] = 2
style["hz_line_width"] = 2
for gg in g.traverse():
if not coloured[gg.nid]:
gg.set_style(style)
coloured[gg.nid] = True
def show(self, tree_style=None, palette=None):
"""
Starts an interactive session to visualize the decomposition.
:return: None
"""
if not palette:
palette = ['#1F77B4', '#AEC7E8', '#FF7F0E',
'#FFBB78', '#2CA02C', '#98DF8A',
'#D62728', '#FF9896', '#9467BD',
'#C5B0D5', '#8C564B', '#C49C94',
'#E377C2', '#F7B6D2', '#7F7F7F',
'#C7C7C7', '#BCBD22', '#DBDB8D',
'#17BECF', '#9EDAE5']
if not self.colorized:
self._colorize(palette)
self.colorized = True
if not tree_style:
from ete2 import TreeStyle
tree_style = TreeStyle()
# tstyle.show_leaf_name = False
tree_style.scale = 28
tree_style.branch_vertical_margin = 6
tree_style.show_branch_length = False
# tstyle.show_branch_support = True
tree_style.show_scale = False
self.L.convert_to_ultrametric()
self.L.show(tree_style=tree_style)
def render(self, fname, layout=None, tree_style=None, palette=None):
"""
Renders the locus tree and writes the image to file.
:param fname: str
Output file path
:param layout:
:param tree_style:
:param palette:
:return: None
"""
if not palette:
palette = ['#1F77B4', '#AEC7E8', '#FF7F0E',
'#FFBB78', '#2CA02C', '#98DF8A',
'#D62728', '#FF9896', '#9467BD',
'#C5B0D5', '#8C564B', '#C49C94',
'#E377C2', '#F7B6D2', '#7F7F7F',
'#C7C7C7', '#BCBD22', '#DBDB8D',
'#17BECF', '#9EDAE5']
if not self.colorized:
self._colorize(palette)
self.colorized = True
if not tree_style:
from ete2 import TreeStyle
tree_style = TreeStyle() # imported during colorizing tree
# tstyle.show_leaf_name = False
tree_style.scale = 28
tree_style.branch_vertical_margin = 6
tree_style.show_branch_length = False
# tstyle.show_branch_support = True
tree_style.show_scale = False
self.L.convert_to_ultrametric()
self.L.render(file_name=fname, tree_style=tree_style)
def write_forest(self, path=None):
"""
Writes the decomposition forest in Newick format.
The forest is returned as a string, optionally written to file.
:param path: str
Path to save the forest.
:return: str
"""
raise NotImplemented
| 42.844075
| 148
| 0.516498
|
36045d94a333a1e1fe3ce9f967dc126079a3eb3b
| 58,355
|
py
|
Python
|
plim/lexer.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 85
|
2015-01-08T20:15:54.000Z
|
2022-03-12T21:51:27.000Z
|
plim/lexer.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 18
|
2015-02-27T14:59:08.000Z
|
2021-09-24T10:27:19.000Z
|
plim/lexer.py
|
spollard/Plim
|
7689de85364691063ed5c43a891c433f9ebef5b9
|
[
"MIT"
] | 14
|
2015-02-26T07:20:42.000Z
|
2022-02-01T17:52:16.000Z
|
# -*- coding: utf-8 -*-
"""Plim lexer"""
import functools
import re
import markdown2
from . import errors
from .util import StringIO, MAXSIZE, joined, space_separated, u
from .extensions import rst_to_html
from .extensions import coffee_to_js
from .extensions import scss_to_css
from .extensions import stylus_to_css
# Preface
# ============================================================================================
WHITESPACE = ' '
NEWLINE = '\n'
OPEN_BRACE = '('
CLOSE_BRACE = ')'
CSS_ID_SHORTCUT_DELIMITER = '#'
CSS_CLASS_SHORTCUT_DELIMITER = '.'
# used to separate tag attributes from its inline content and as a prefix of literal blocks
LITERAL_CONTENT_PREFIX = '|'
# Same as above but with the trailing whitespace
LITERAL_CONTENT_SPACE_PREFIX = ','
DYNAMIC_CONTENT_PREFIX = '='
DYNAMIC_CONTENT_SPACE_PREFIX = "=,"
DYNAMIC_ATTRIBUTES_PREFIX = '**'
# used to separate inline tags
INLINE_TAG_SEPARATOR = ':'
# used to separate attribute-value pairs from one another
ATTRIBUTES_DELIMITER = WHITESPACE
# used to separate attribute name from its value
# This is not the same as DYNAMIC_CONTENT_PREFIX
ATTRIBUTE_VALUE_DELIMITER = '='
# port of ruby's boolean methods:
# Ruby's Slim: selected=option_selected?("Slim")
# Python's Plim: selected=option_selected("Plim")?
BOOLEAN_ATTRIBUTE_MARKER = '?'
LINE_BREAK = '\\'
# Please note that in Plim all tag names are intentionally lower-cased
TAG_RULE = '(?P<html_tag>[a-z][a-z0-9-]*)'
TAG_RE = re.compile(TAG_RULE)
LINE_PARTS_RE = re.compile('(?P<indent>\s*)(?P<line>.*)\s*')
MAKO_FILTERS_TAIL_RE = re.compile('\|\s*(?P<filters>[a-zA-Z][_.a-zA-Z0-9]*(?:,\s*[a-zA-Z][_.a-zA-Z0-9]*)*)\s*$')
NUMERIC_VALUE_RE = re.compile(
# Order matters
# Can parse -NUM, +NUM, NUM, .NUM, NUM% and all its combinations
'(?P<value>(?:[-+]?[0-9]*\.[0-9]+|[-+]?[0-9]+%?))'
)
STATEMENT_CONVERT = {
'unless': 'if not (',
'until': 'while not ('
}
INLINE_PYTHON_TERMINATOR = '---'
CSS_ID_SHORTCUT_TERMINATORS = (
CSS_CLASS_SHORTCUT_DELIMITER,
WHITESPACE,
OPEN_BRACE,
INLINE_TAG_SEPARATOR
)
CSS_CLASS_SHORTCUT_TERMINATORS = (
CSS_CLASS_SHORTCUT_DELIMITER,
WHITESPACE,
OPEN_BRACE,
INLINE_TAG_SEPARATOR
)
ATTRIBUTE_TERMINATORS = (
ATTRIBUTE_VALUE_DELIMITER,
ATTRIBUTES_DELIMITER,
INLINE_TAG_SEPARATOR,
LITERAL_CONTENT_PREFIX,
LITERAL_CONTENT_SPACE_PREFIX
)
ATTRIBUTE_TERMINATORS_WITH_PARENTHESES = (
ATTRIBUTE_VALUE_DELIMITER,
ATTRIBUTES_DELIMITER,
CLOSE_BRACE
)
ATTRIBUTE_VALUE_TERMINATORS = (
ATTRIBUTES_DELIMITER,
INLINE_TAG_SEPARATOR,
LITERAL_CONTENT_PREFIX,
LITERAL_CONTENT_SPACE_PREFIX,
DYNAMIC_CONTENT_PREFIX,
BOOLEAN_ATTRIBUTE_MARKER
)
ATTRIBUTE_VALUE_TERMINATORS_WITH_PARENTHESES = (
ATTRIBUTES_DELIMITER,
INLINE_TAG_SEPARATOR,
LITERAL_CONTENT_PREFIX,
LITERAL_CONTENT_SPACE_PREFIX,
DYNAMIC_CONTENT_PREFIX,
BOOLEAN_ATTRIBUTE_MARKER,
CLOSE_BRACE,
NEWLINE
)
STATEMENT_TERMINATORS = {INLINE_TAG_SEPARATOR, NEWLINE}
PYTHON_EXPR_OPEN_BRACES_RE = re.compile('(?P<start_brace>\(|\{|\[).*')
PYTHON_EXPR_CLOSING_BRACES_RE = re.compile('\)|\}|\].*')
MAKO_EXPR_START_BRACE_RE = re.compile('(?P<start_brace>\$\{).*')
MAKO_EXPR_COUNT_OPEN_BRACES_RE = re.compile('\{')
MAKO_EXPR_COUNT_CLOSING_BRACES_RE = re.compile('\}')
QUOTES_RE = re.compile('(?P<quote_type>\'\'\'|"""|\'|").*') # order matters!
EMBEDDING_QUOTE = '`'
EMBEDDING_QUOTE_ESCAPE = EMBEDDING_QUOTE * 2
EMBEDDING_QUOTE_END = '`_'
EMBEDDING_QUOTES_RE = re.compile('(?P<quote_type>{quote_symbol}).*'.format(quote_symbol=EMBEDDING_QUOTE))
# ============================================================================================
# Okay, let's get started.
# There are three different types of functions below: searchers, parsers, and extractors.
# They are grouped together by API and task similarity.
#
# -- SEARCHERS are helper functions that try to figure out the next step of parsing process
# based on the current chunk of data.
# Each searcher MUST accept one required first positional argument *line*.
# ------------------------------
#
# -- PARSERS are the building blocks of Plim. They follow the strict API rules for both
# input and return values.
#
# Every parser MUST accept five input arguments:
# 1) ``indent_level`` - an indentation level of the current line. When the parser reaches a line
# which indentation is lower or equal to ``indent_level``, it returns control to a top-level function.
# 2) ``current_line`` - a line which is being parsed. This is the line that has been matched by
# ``matched`` object at the previous parsing step.
# 3) ``matched`` - an instance of ``re.MatchObject`` of the regex associated with the current parser.
# 4) ``source`` - an instance of an enumerated object returned by :func:`enumerate_source`.
# 5) ``syntax`` - an instance of one of :class:`plim.syntax.BaseSyntax` children.
#
# Every parser MUST return a 4-tuple of:
# 1) parsed_data - a string of successfully parsed data
# 2) tail_indent - an indentation level of the ``tail line``
# 3) tail_line - a line which indentation level (``tail_indent``) is lower or equal to
# the input ``indent_level``.
# 4) ``source`` - an instance of enumerated object returned by :func:`enumerate_source`
# which represents the remaining (untouched) plim markup.
# ------------------------------
#
# -- EXTRACTORS are "light" versions of parsers. Their input arguments
# and return values are task-specific. However, they still have several common features:
# - Each extractor has its own starting and termination sequences.
# - Each extractor tries to find the starting sequence of characters at the beginning
# of the input line. If that attempt fails, the extractor returns None (in most cases).
# If the attempt is successful, the extractor captures all the input characters up to
# the termination sequence.
# - The return value of the succeeded extractor MUST contain not only the extracted value,
# but also an instance of enumerated object returned by :func:`enumerate_source`.
# ------------------------------
#
# P.S. I intentionally did not use "for" statements in conjunction with iterators.
# I wanted to make all parsers free from implicit side-effects.
# Therefore, you can find a number of "while True" and "try/except StopIteration" constructs below.
# Searchers
# ==================================================================================
def search_quotes(line, escape_char='\\', quotes_re=QUOTES_RE):
"""
:param line: may be empty
:type line: str
:param escape_char:
"""
match = quotes_re.match(line)
if not match: return None
find_seq = match.group('quote_type')
find_seq_len = len(find_seq)
pos = find_seq_len
line_len = len(line)
while pos < line_len:
if line[pos] == escape_char:
pos += 2
continue
if line[pos:].startswith(find_seq):
return pos + find_seq_len
pos += 1
return None
def search_parser(lineno, line, syntax):
"""Finds a proper parser function for a given line or raises an error
:param lineno:
:param line:
:type syntax: :class:`plim.syntax.BaseSyntax`
"""
for template, parser in syntax.parsers:
matched = template.match(line)
if matched:
return matched, parser
raise errors.ParserNotFound(lineno, line)
# Extractors
# ==================================================================================
def extract_embedding_quotes(content):
"""
``content`` may be empty
:param content:
:param escape_seq:
"""
match = EMBEDDING_QUOTES_RE.match(content)
if not match:
return None
original_string = [EMBEDDING_QUOTE]
embedded_string = []
tail = content[1:]
while tail:
if tail.startswith(EMBEDDING_QUOTE_ESCAPE):
original_string.append(EMBEDDING_QUOTE_ESCAPE)
embedded_string.append(EMBEDDING_QUOTE)
tail = tail[len(EMBEDDING_QUOTE_ESCAPE):]
continue
if tail.startswith(EMBEDDING_QUOTE):
append_seq = EMBEDDING_QUOTE_END if tail.startswith(EMBEDDING_QUOTE_END) else EMBEDDING_QUOTE
original_string.append(append_seq)
original_string = joined(original_string)
content = content[len(original_string):]
embedded_string = joined(embedded_string)
return embedded_string, original_string, content
current_char = tail[0]
original_string.append(current_char)
embedded_string.append(current_char)
tail = tail[1:]
original_string = joined(original_string)
pos = len(original_string)
raise errors.PlimSyntaxError(u('Embedding quote is not closed: "{}"').format(original_string), pos)
def _extract_braces_expression(line, source, starting_braces_re, open_braces_re, closing_braces_re):
"""
:param line: may be empty
:type line: str
:param source:
:type source: str
:param starting_braces_re:
:param open_braces_re:
:param closing_braces_re:
"""
match = starting_braces_re.match(line)
if not match:
return None
open_brace = match.group('start_brace')
buf = [open_brace]
tail = line[len(open_brace):]
braces_counter = 1
while True:
if not tail:
_, tail = next(source)
tail = tail.lstrip()
while tail:
current_char = tail[0]
if closing_braces_re.match(current_char):
braces_counter -= 1
buf.append(current_char)
if braces_counter:
tail = tail[1:]
continue
return joined(buf), tail[1:], source
if current_char == NEWLINE:
_, tail = next(source)
tail = tail.lstrip()
continue
if open_braces_re.match(current_char):
braces_counter += 1
buf.append(current_char)
tail = tail[1:]
continue
result = search_quotes(tail)
if result is not None:
buf.append(tail[:result])
tail = tail[result:]
continue
buf.append(current_char)
tail = tail[1:]
extract_braces = lambda line, source: _extract_braces_expression(line, source,
PYTHON_EXPR_OPEN_BRACES_RE,
PYTHON_EXPR_OPEN_BRACES_RE,
PYTHON_EXPR_CLOSING_BRACES_RE
)
extract_mako_expression = lambda line, source: _extract_braces_expression(line, source,
MAKO_EXPR_START_BRACE_RE,
MAKO_EXPR_COUNT_OPEN_BRACES_RE,
MAKO_EXPR_COUNT_CLOSING_BRACES_RE
)
def extract_identifier(line, source, identifier_start='#', terminators=('.', ' ', CLOSE_BRACE, INLINE_TAG_SEPARATOR)):
"""
:param line: Current line. It may be empty.
:type line: str or unicode
:param source:
:type source: str
:param identifier_start:
:param terminators:
:type terminators: tuple or set
"""
if not line or not line.startswith(identifier_start):
return None
pos = len(identifier_start)
buf = [identifier_start]
tail = line[pos:]
while tail:
for terminator in terminators:
if tail.startswith(terminator):
return joined(buf).rstrip(), tail, source
# Let's try to find "mako variable" part of possible css-identifier
result = extract_mako_expression(tail, source)
if result:
expr, tail, source = result
buf.append(expr)
continue
# Check for a string object
result = search_quotes(tail)
if result is not None:
buf.append(tail[:result])
tail = tail[result:]
continue
# Try to search braces of function calls etc
result = extract_braces(tail, source)
if result:
result, tail, source = result
buf.append(result)
continue
current_char = tail[0]
buf.append(current_char)
tail = tail[1:]
return joined(buf).rstrip(), tail, source
def extract_digital_attr_value(line):
result = NUMERIC_VALUE_RE.match(line)
if result:
return result.group('value'), line[result.end():]
return None
def extract_quoted_attr_value(line, search_quotes=search_quotes, remove_escape_seq=True):
"""
:param line:
:param search_quotes:
:param remove_escape_seq: Sometimes escape sequences have to be removed outside of the extractor.
This flag prevents double-escaping of backslash sequences.
:return:
"""
result = search_quotes(line)
if result:
if line.startswith('"""') or line.startswith("'''"):
skip = 3
else:
skip = 1
# remove quotes from value
value = line[skip:result - skip]
# We have to remove backslash escape sequences from the value, but
# at the same time, preserve unicode escape sequences like "\u4e2d\u6587".
if remove_escape_seq:
value = value.encode('raw_unicode_escape')
value = value.decode('unicode_escape')
return value, line[result:]
return None
def extract_dynamic_attr_value(line, source, terminators, syntax):
result = extract_identifier(line, source, '', terminators)
if result is None:
return None
result, tail, source = result
if MAKO_EXPR_START_BRACE_RE.match(line):
# remove VARIABLE_PLACEHOLDER_START_SEQUENCE and VARIABLE_PLACEHOLDER_END_SEQUENCE from variable
value = result[len(syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE):-len(syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE)]
elif line.startswith(OPEN_BRACE):
# remove "(" and ")" from variable
value = result[1:-1]
else:
value = result
return value, tail, source
def extract_dynamic_tag_attributes(line, source, syntax, inside_parentheses=False):
"""
Extract one occurrence of ``**dynamic_attributes``
:param line:
:param source:
:param inside_parentheses:
"""
if not line.startswith(DYNAMIC_ATTRIBUTES_PREFIX):
return None
line = line[len(DYNAMIC_ATTRIBUTES_PREFIX):]
terminators = {
WHITESPACE,
NEWLINE,
LITERAL_CONTENT_PREFIX,
LITERAL_CONTENT_SPACE_PREFIX,
# we want to terminate extract_identifier() by DYNAMIC_ATTRIBUTES_PREFIX,
# but it contains two characters, whereas the function checks only one character.
# Therefore, we use a single asterisk terminator here instead of DYNAMIC_ATTRIBUTES_PREFIX.
'*',
INLINE_TAG_SEPARATOR,
LINE_BREAK
}
if inside_parentheses:
terminators.add(CLOSE_BRACE)
result = extract_identifier(line, source, '', terminators)
if result is None:
return None
expr, tail, source = result
attributes = u(
'\n%for __plim_key__, __plim_value__ in {expr}.items():\n'
'{var_start}__plim_key__{var_end}="{var_start}__plim_value__{var_end}"\n'
'%endfor\n'
).format(
expr=expr,
var_start=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
var_end=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
)
return attributes, tail, source
def extract_tag_attribute(line, source, syntax, inside_parentheses=False):
"""
:param line:
:param source:
:param inside_parentheses:
:return:
"""
terminators = inside_parentheses and ATTRIBUTE_TERMINATORS_WITH_PARENTHESES or ATTRIBUTE_TERMINATORS
result = extract_identifier(line, source, '', terminators)
if result and result[0]:
result, tail, source = result
attr_name = result
if tail.startswith(ATTRIBUTE_VALUE_DELIMITER):
# value is presented in a form of
# =${dynamic_value} or =dynamic_value or ="value with spaces"
# ------------------------------------------------------------------
# remove ATTRIBUTE_VALUE_DELIMITER
tail = tail[1:]
# 1. Try to parse quoted literal value
# -------------------------------------
result = extract_quoted_attr_value(tail)
if result:
value, tail = result
# remove possible newline character
value = value.rstrip()
return u('{attr_name}="{value}"').format(attr_name=attr_name, value=value), tail, source
# 2. Try to parse digital value
# -------------------------------------
result = extract_digital_attr_value(tail)
if result:
value, tail = result
return u('{attr_name}="{value}"').format(attr_name=attr_name, value=value), tail, source
# 3. Try to parse dynamic value
# -------------------------------------
terminators = inside_parentheses and ATTRIBUTE_VALUE_TERMINATORS_WITH_PARENTHESES or ATTRIBUTE_VALUE_TERMINATORS
result = extract_dynamic_attr_value(tail, source, terminators, syntax)
if result:
value, tail, source = result
# remove possible newline character
value = value.rstrip()
if tail.startswith(BOOLEAN_ATTRIBUTE_MARKER):
# selected=dynamic_variable?
value = u("""{start_var}({value}) and '{attr_name}="{attr_name}"' or ''|n{end_var}""").format(
value=value,
attr_name=attr_name,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
)
attribute = value
tail = tail[1:]
else:
attribute = u('{attr_name}="{start_var}{value}{end_var}"').format(
attr_name=attr_name,
value=value,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
)
return attribute, tail, source
return None
elif inside_parentheses and tail.startswith(ATTRIBUTES_DELIMITER) or tail.startswith(CLOSE_BRACE):
# attribute is presented in a form of boolean attribute
# which should be converted to attr="attr"
return u('{attr_name}="{attr_name}"').format(attr_name=attr_name), tail, source
else:
return None
return None
def extract_line_break(tail, source):
"""
Checks the first character of the tail.
:param tail:
:param source:
:return:
"""
found = False
while True:
if tail.startswith(LINE_BREAK):
found = True
try:
_, tail = next(source)
except StopIteration:
return found, '', source
tail = tail.lstrip()
continue
break
return found, tail, source
def extract_statement_expression(tail, source):
"""
:param tail:
:param source:
:return:
"""
buf = []
# Ensure that tail ends with a newline character
# (required by extract_braces() to properly handle multi-line expressions)
tail = tail.strip() + '\n'
while tail:
# Try to search braces of function calls etc
found, tail, source = extract_line_break(tail, source)
if found:
buf.append(' ')
result = extract_braces(tail, source)
if result:
head, tail, source = result
buf.append(head)
continue
buf.append(tail[0])
tail = tail[1:]
return joined(buf).strip(), source
def extract_tag_line(line, source, syntax):
"""
Returns a 3-tuple of inline tags sequence, closing tags sequence, and a dictionary of
last tag components (name, attributes, content)
:param line:
:type line: str
:param source:
:type source: enumerate
:param parsers: 2-tuple of (parser_regex, parser_callable)
:type parsers: tuple
"""
buf = []
close_buf = []
components = {}
tail = line
while tail:
tag_composer = ['<']
# Get tag name
match = TAG_RE.match(tail)
if match:
html_tag = match.group('html_tag').lower()
tail = tail[match.end():]
else:
html_tag = 'div'
tag_composer.append(html_tag)
components['name'] = html_tag
# 1. Parse css id
# --------------------------------------------
result = extract_identifier(tail, source, CSS_ID_SHORTCUT_DELIMITER, CSS_ID_SHORTCUT_TERMINATORS)
if result is None:
css_id = ''
else:
result, tail, source = result
# remove the preceding '#' character
css_id = result[1:].rstrip()
# 2. Parse css class shortcut
# --------------------------------------------
class_identifiers = []
while True:
result = extract_identifier(tail, source, CSS_CLASS_SHORTCUT_DELIMITER, CSS_CLASS_SHORTCUT_TERMINATORS)
if result:
result, tail, source = result
# remove the preceding '.' character
class_identifiers.append(result[1:].rstrip())
continue
break
# 3. Parse tag attributes
# -----------------------------------
_, tail, source = extract_line_break(tail.lstrip(), source)
inside_parentheses = tail.startswith(OPEN_BRACE)
if inside_parentheses:
tail = tail[1:].lstrip()
attributes = []
while True:
_, tail, source = extract_line_break(tail.lstrip(), source)
# 3.1 try to get and unpack dynamic attributes
result = extract_dynamic_tag_attributes(tail, source, syntax, inside_parentheses)
if result:
dynamic_attrs, tail, source = result
attributes.append(dynamic_attrs)
continue
# 3.2. get attribute-value pairs until the end of the section (indicated by terminators)
result = extract_tag_attribute(tail, source, syntax, inside_parentheses)
if result:
attribute_pair, tail, source = result
if attribute_pair.startswith('id="') and css_id:
raise errors.PlimSyntaxError('Your template has two "id" attribute definitions', line)
if attribute_pair.startswith('class="'):
# len('class="') == 7
class_identifiers.append(attribute_pair[7:-1])
continue
attributes.append(attribute_pair)
continue
else:
if inside_parentheses and not tail:
# We have reached the end of the line.
# Try to parse multiline attributes list.
lineno, tail = next(source)
continue
if css_id:
attributes.append(u('id="{ids}"').format(ids=css_id))
if class_identifiers:
class_identifiers = space_separated(class_identifiers)
attributes.append(u('class="{classes}"').format(classes=class_identifiers))
break
attributes = space_separated(attributes)
components['attributes'] = attributes
if attributes:
tag_composer.extend([' ', attributes])
# 3.2 syntax check
if inside_parentheses:
if tail.startswith(CLOSE_BRACE):
# We have reached the end of attributes definition
tail = tail[1:].lstrip()
else:
raise errors.PlimSyntaxError("Unexpected end of line", tail)
else:
if tail.startswith(' '):
tail = tail.lstrip()
if html_tag in EMPTY_TAGS:
tag_composer.append('/>')
else:
tag_composer.append('>')
close_buf.append(u('</{tag}>').format(tag=html_tag))
buf.append(joined(tag_composer))
if tail.startswith(INLINE_TAG_SEPARATOR):
tail = tail[1:].lstrip()
break
# 3.3 The remainder of the line will be treated as content
# ------------------------------------------------------------------
components['content'] = ''
if tail:
if tail.startswith(DYNAMIC_CONTENT_PREFIX):
tail = tail[1:]
if tail.startswith(DYNAMIC_CONTENT_PREFIX):
# case for the '==' prefix
tail = _inject_n_filter(tail)
if tail.startswith(DYNAMIC_CONTENT_SPACE_PREFIX):
# ensure that a single whitespace is appended
tail, source = extract_statement_expression(tail[2:], source)
buf.append(u("{start_var}{content}{end_var} ").format(
content=tail,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
))
else:
tail, source = extract_statement_expression(tail[1:], source)
buf.append(u("{start_var}{content}{end_var}").format(
content=tail,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
))
else:
if tail.startswith(LITERAL_CONTENT_SPACE_PREFIX):
# ensure that a single whitespace is appended
tail, source = extract_statement_expression(tail[1:], source)
buf.append(u("{start_var}{content}{end_var} ").format(
content=tail,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
))
else:
tail, source = extract_statement_expression(tail, source)
buf.append(u("{start_var}{content}{end_var}").format(
content=tail,
start_var=syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE,
end_var=syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE
))
elif tail.startswith(LITERAL_CONTENT_PREFIX):
tail = _parse_embedded_markup(tail[1:].strip(), syntax)
buf.append(tail)
elif tail.startswith(LITERAL_CONTENT_SPACE_PREFIX):
tail = _parse_embedded_markup(tail[1:].strip(), syntax)
buf.append(u("{content} ").format(content=tail))
else:
tail = _parse_embedded_markup(tail.strip(), syntax)
buf.append(tail)
components['content'] = buf[-1]
tail = ''
return joined(buf), joined(reversed(close_buf)), components, tail, source
# Parsers
# ==================================================================================
def parse_style_script(indent_level, current_line, matched, source, syntax):
"""
:param indent_level:
:param current_line:
:type current_line: str
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
extracted_html_line, close_buf, _, tail, source = extract_tag_line(current_line, source, syntax)
buf = [extracted_html_line, '\n']
parsed_data, tail_indent, tail_line, source = parse_explicit_literal_no_embedded(
indent_level,
LITERAL_CONTENT_PREFIX,
matched,
source,
syntax
)
buf.extend([parsed_data, close_buf])
return joined(buf), tail_indent, tail_line, source
def parse_doctype(indent_level, current_line, ___, source, syntax):
"""
:param indent_level:
:param current_line:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
match = syntax.PARSE_DOCTYPE_RE.match(current_line.strip())
doctype = match.group('type')
return DOCTYPES.get(doctype, DOCTYPES['5']), indent_level, '', source
def parse_handlebars(indent_level, current_line, ___, source, syntax):
"""
:param indent_level:
:param current_line:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
processed_tag, tail_indent, tail_line, source = parse_tag_tree(indent_level, current_line, ___, source, syntax)
assert processed_tag.startswith("<handlebars") and processed_tag.endswith("</handlebars>")
# We don't want to use str.replace() here, therefore
# len("<handlebars") == len("handlebars>") == 11
processed_tag = u(
'<script type="text/x-handlebars"{content}script>'
).format(
content=processed_tag[11:-11]
)
return processed_tag, tail_indent, tail_line, source
def parse_tag_tree(indent_level, current_line, ___, source, syntax):
"""
:param indent_level:
:param current_line:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return: 4-tuple
"""
buf = []
close_buf = []
current_line = current_line.strip()
html_tag, close_seq, _, tail, source = extract_tag_line(current_line, source, syntax)
buf.append(html_tag)
close_buf.append(close_seq)
if tail:
parsed, tail_indent, tail_line, source = parse_plim_tail(0, indent_level, tail, source, syntax)
# at this point we have tail_indent <= indent_level
buf.extend(parsed)
buf.append(joined(close_buf))
return joined(buf), tail_indent, tail_line, source
while True:
try:
lineno, current_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(current_line)
if not tail_line:
continue
if tail_indent <= indent_level:
buf.append(joined(close_buf))
return joined(buf), tail_indent, tail_line, source
# ----------------------------------------------------------
while tail_line:
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
if tail_indent <= indent_level:
buf.append(joined(close_buf))
return joined(buf), tail_indent, tail_line, source
buf.append(joined(close_buf))
return joined(buf), 0, '', source
def parse_markup_languages(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
markup_parser = MARKUP_LANGUAGES[matched.group('lang')]
parsed_data, tail_indent, tail_line, source = parse_explicit_literal_no_embedded(
indent_level,
LITERAL_CONTENT_PREFIX,
matched,
source,
syntax
)
# This is slow but correct.
# Trying to remove redundant indentation
parsed_data = markup_parser(parsed_data)
return parsed_data.strip(), tail_indent, tail_line, source
def parse_python(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
# TODO: merge with parse_mako_text()
if matched.group('python').endswith('!'):
buf = ['<%!\n']
else:
buf = ['<%\n']
inline_statement = matched.group('expr')
if inline_statement:
buf.extend([inline_statement.strip(), '\n'])
parsed_data, tail_indent, tail_line, source = parse_explicit_literal_no_embedded(
indent_level,
LITERAL_CONTENT_PREFIX,
matched,
source,
syntax
)
# do not render a python block if it's empty
if not inline_statement and not parsed_data:
return u(''), tail_indent, tail_line, source
buf.extend([u('{literal}\n').format(literal=parsed_data.rstrip()), '%>\n'])
return joined(buf), tail_indent, tail_line, source
def parse_python_new_style(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
buf = [matched.group('excl') and '-py! ' or '-py ']
inline_statement = matched.group('expr')
if inline_statement:
inline_statement, _tail_line_, source = extract_identifier(inline_statement, source, '', {INLINE_PYTHON_TERMINATOR, NEWLINE})
buf.append(inline_statement)
converted_line = joined(buf).strip()
match = syntax.PARSE_PYTHON_CLASSIC_RE.match(converted_line)
return parse_python(indent_level, __, match, source, syntax)
def parse_mako_text(indent, __, matched, source, syntax):
"""
:param indent:
:param __:
:param matched:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
_, __, components, tail, source = extract_tag_line(matched.group('line').strip(), source, syntax)
buf = ['\n<%text']
if components['attributes']:
buf.extend([' ', components['attributes']])
buf.append('>\n')
if components['content']:
buf.extend([components['content'], '\n'])
parsed_data, tail_indent, tail_line, source = parse_explicit_literal_no_embedded(
indent,
LITERAL_CONTENT_PREFIX,
matched,
source,
syntax
)
if parsed_data:
buf.append(u('{literal}\n').format(literal=parsed_data.rstrip()))
buf.append('</%text>\n')
return joined(buf), tail_indent, tail_line, source
def parse_call(indent_level, current_line, matched, source, syntax):
"""
:param indent_level:
:param current_line:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return: :raise:
"""
_, __, components, tail, source = extract_tag_line(matched.group('line').strip(), source, syntax)
tag = components['content'].strip()
if not tag:
raise errors.PlimSyntaxError("-call must contain namespace:defname declaration", current_line)
buf = [u('\n<%{tag}').format(tag=tag)]
if components['attributes']:
buf.extend([' ', components['attributes']])
buf.append('>\n')
while True:
try:
lineno, tail_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(tail_line)
if not tail_line:
continue
# Parse tree
# --------------------------------------------------------
while tail_line:
if tail_indent <= indent_level:
buf.append(u('</%{tag}>\n').format(tag=tag))
return joined(buf), tail_indent, tail_line, source
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
buf.append(u('</%{tag}>\n').format(tag=tag))
return joined(buf), 0, '', source
def parse_comment(indent_level, __, ___, source, syntax):
"""
:param indent_level:
:param __:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
while True:
try:
lineno, tail_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(tail_line)
if not tail_line:
continue
if tail_indent <= indent_level:
return '', tail_indent, tail_line, source
return '', 0, '', source
def parse_statements(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
stmnt = matched.group('stmnt')
expr = matched.group('expr')
buf = [u('\n{statement_start}{statement}').format(
statement_start=syntax.STATEMENT_START_START_SEQUENCE,
statement=stmnt
)]
if expr:
expr, source = extract_statement_expression(expr, source)
expr, tail_line, source = extract_identifier(expr, source, '', STATEMENT_TERMINATORS)
expr = expr.lstrip()
tail_line = tail_line[1:].lstrip()
parsed, tail_indent, tail_line, source = parse_plim_tail(0, indent_level, tail_line, source, syntax)
buf.append(joined([' ', expr, syntax.STATEMENT_START_END_SEQUENCE, '\n', joined(parsed)]))
else:
# So far only the "-try" statement has empty ``expr`` part
buf.extend([syntax.STATEMENT_START_END_SEQUENCE, '\n'])
try:
lineno, tail_line = next(source)
except StopIteration:
tail_indent = 0
tail_line = ''
else:
tail_indent, tail_line = scan_line(tail_line)
def complete_statement(buf, tail_indent, tail_line, source, statement, syntax):
buf.extend([
'\n',
syntax.STATEMENT_END_START_SEQUENCE,
u('end{statement}').format(statement=statement),
syntax.STATEMENT_END_END_SEQUENCE,
'\n'
])
return joined(buf), tail_indent, tail_line, source
while True:
# Parse tree
# --------------------------------------------------------
while tail_line:
if stmnt == 'if':
if tail_indent == indent_level:
# Check for elif/else
match = syntax.PARSE_ELIF_ELSE_RE.match(tail_line)
if match:
if match.group('control') == 'elif':
expr, source = extract_statement_expression(match.group('expr'), source)
expr, tail_line, source = extract_identifier(expr, source, '', STATEMENT_TERMINATORS)
expr = expr.lstrip()
tail_line = tail_line[1:].lstrip()
parsed, tail_indent, tail_line, source = parse_plim_tail(0, indent_level, tail_line, source, syntax)
buf.append(joined([
'\n',
syntax.STATEMENT_START_START_SEQUENCE,
u('elif {expr}').format(expr=expr),
syntax.STATEMENT_START_END_SEQUENCE,
'\n',
joined(parsed)
]))
if tail_line:
continue
break
else:
# "-else" is found
expr = match.group('expr')
result = extract_identifier(expr, source, '', STATEMENT_TERMINATORS)
if result:
expr, tail_line, source = extract_identifier(expr, source, '', STATEMENT_TERMINATORS)
tail_line = tail_line[1:].lstrip()
parsed, tail_indent, tail_line, source = parse_plim_tail(0, indent_level, tail_line, source, syntax)
buf.append(joined([
'\n',
syntax.STATEMENT_START_START_SEQUENCE,
'else',
syntax.STATEMENT_START_END_SEQUENCE,
'\n',
joined(parsed)
]))
if tail_line:
continue
buf.append(joined([
'\n',
syntax.STATEMENT_START_START_SEQUENCE,
'else',
syntax.STATEMENT_START_END_SEQUENCE,
'\n'
]))
break
else:
# elif/else is not found, finalize and return buffer
return complete_statement(buf, tail_indent, tail_line, source, stmnt, syntax)
elif tail_indent < indent_level:
return complete_statement(buf, tail_indent, tail_line, source, stmnt, syntax)
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
elif stmnt == 'try':
if tail_indent == indent_level:
# Check for except/else/finally
match = syntax.PARSE_EXCEPT_ELSE_FINALLY_RE.match(tail_line)
if match:
if match.group('control') == 'except':
expr, source = extract_statement_expression(match.group('expr'), source)
buf.append(u('\n%except {expr}:\n').format(expr=expr))
break
elif match.group('control') == 'else':
buf.append('\n%else:\n')
break
else:
# "-finally" is found
buf.append('\n%finally:\n')
break
else:
# elif/else is not found, finalize and return the buffer
return complete_statement(buf, tail_indent, tail_line, source, stmnt, syntax)
elif tail_indent < indent_level:
return complete_statement(buf, tail_indent, tail_line, source, stmnt, syntax)
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
else: # stmnt == for/while
if tail_indent <= indent_level:
return complete_statement(buf, tail_indent, tail_line, source, stmnt, syntax)
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
try:
lineno, tail_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(tail_line)
return complete_statement(buf, 0, '', source, stmnt, syntax)
def parse_foreign_statements(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
stmnt = STATEMENT_CONVERT[matched.group('stmnt')]
buf = [u('-{statement}').format(statement=stmnt)]
expr = matched.group('expr')
expr, source = extract_statement_expression(expr, source)
buf.append(joined([expr, ')']))
matched = syntax.PARSE_STATEMENTS_RE.match(joined(buf))
return parse_statements(indent_level, __, matched, source, syntax)
def parse_explicit_literal(indent_level, current_line, ___, source, syntax, parse_embedded):
"""
Parses lines and blocks started with the "|" (pipe) or "," (comma) character.
:param indent_level:
:param current_line:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:param parse_embedded: whether to parse possible embedded Plim markup
:type parse_embedded: bool
"""
# Get rid of the pipe character
trailing_space_required = current_line[0] == LITERAL_CONTENT_SPACE_PREFIX
# ---------------------------------
def prepare_result(buf):
result = joined(buf).rstrip()
if trailing_space_required:
result = u("{} ").format(result)
if parse_embedded:
result = _parse_embedded_markup(result, syntax)
return result
# --------------------------------
current_line = current_line[1:]
_, striped_line = scan_line(current_line)
# Add line and trailing newline character
buf = [current_line.strip(), striped_line and "\n" or ""]
align = MAXSIZE
while True:
try:
lineno, current_line = next(source)
except StopIteration:
break
indent, line = scan_line(current_line)
if not line:
buf.append('\n')
continue
if indent <= indent_level:
result = prepare_result(buf)
return result, indent, line, source
new_align = len(current_line) - len(current_line.lstrip())
if align > new_align:
align = new_align
# remove preceding spaces
line = current_line[align:].rstrip()
buf.extend([line.rstrip(), "\n"])
result = prepare_result(buf)
return result, 0, '', source
parse_explicit_literal_with_embedded_markup = functools.partial(parse_explicit_literal, parse_embedded=True)
parse_explicit_literal_no_embedded = functools.partial(parse_explicit_literal, parse_embedded=False)
def _parse_embedded_markup(content, syntax):
"""
:param content:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
:rtype: str
"""
buf = []
tail = content
while tail:
if tail.startswith(EMBEDDING_QUOTE_ESCAPE):
tail = tail[len(EMBEDDING_QUOTE_ESCAPE):]
buf.append(EMBEDDING_QUOTE)
continue
result = extract_embedding_quotes(tail)
if result:
embedded, original, tail = result
embedded = embedded.strip()
if embedded:
try:
embedded = compile_plim_source(embedded, syntax, False)
except errors.ParserNotFound:
# invalid plim markup, leave things as is
buf.append(original)
else:
buf.append(embedded)
continue
buf.append(tail[0])
tail = tail[1:]
return joined(buf)
def _inject_n_filter(line):
"""
This is a helper function for :func:parse_variable
:param line:
"""
# try to find specified filters
found_filters = MAKO_FILTERS_TAIL_RE.search(line)
if found_filters:
# inject "n" filter to specified filters chain
line = u('{expr}n,{filters}').format(
expr=line[:found_filters.start('filters')].rstrip(),
filters=line[found_filters.start('filters'):]
)
else:
line = u('{expr}|n').format(expr=line)
return line
def parse_variable(indent_level, __, matched, source, syntax):
""" = variable or == variable
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
explicit_space = matched.group('explicit_space') and ' ' or ''
prevent_escape = matched.group('prevent_escape')
buf = [syntax.VARIABLE_PLACEHOLDER_START_SEQUENCE, matched.group('line')]
while True:
try:
lineno, current_line = next(source)
except StopIteration:
break
indent, line = scan_line(current_line)
if not line:
continue
if indent <= indent_level:
buf = joined(buf)
if prevent_escape:
buf = _inject_n_filter(buf)
# add a closing brace to complete variable expression syntax ("${}" in case of mako).
buf += syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE + explicit_space
return buf, indent, line, source
buf.append(line.strip())
buf = joined(buf)
if prevent_escape:
buf = _inject_n_filter(buf)
buf += syntax.VARIABLE_PLACEHOLDER_END_SEQUENCE + explicit_space
return buf, 0, '', source
def parse_early_return(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
return u('\n<% {keyword} %>\n').format(keyword=matched.group('keyword')), indent_level, '', source
def parse_implicit_literal(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
return parse_explicit_literal_with_embedded_markup(
indent_level,
u('{}{}').format(LITERAL_CONTENT_PREFIX, matched.group('line')),
matched,
source,
syntax
)
def parse_raw_html(indent_level, current_line, ___, source, syntax):
"""
:param indent_level:
:param current_line:
:param ___:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
buf = [current_line.strip(), '\n']
while True:
try:
lineno, tail_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(tail_line)
if not tail_line:
continue
# Parse a tree
# --------------------------------------------------------
while tail_line:
if tail_indent <= indent_level:
return joined(buf), tail_indent, tail_line, source
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
return joined(buf), 0, '', source
def parse_mako_one_liners(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
_, __, components, tail, source = extract_tag_line(matched.group('line').strip(), source, syntax)
buf = [u('<%{tag}').format(tag=components['name'])]
if components['content']:
buf.append(u(' file="{name}"').format(name=components['content']))
if components['attributes']:
buf.extend([' ', components['attributes']])
buf.append('/>')
return joined(buf), indent_level, '', source
def parse_def_block(indent_level, __, matched, source, syntax):
"""
:param indent_level:
:param __:
:param matched:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
_, __, components, tail, source = extract_tag_line(matched.group('line'), source, syntax)
tag = components['name']
buf = [u('<%{def_or_block}').format(def_or_block=tag)]
if components['content']:
buf.append(u(' name="{name}"').format(name=components['content'].strip()))
if components['attributes']:
buf.extend([' ', components['attributes']])
buf.append('>\n')
while True:
try:
lineno, tail_line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(tail_line)
if not tail_line:
continue
# Parse a tree
# --------------------------------------------------------
while tail_line:
if tail_indent <= indent_level:
buf.append(u('</%{def_or_block}>\n').format(def_or_block=tag))
return joined(buf), tail_indent, tail_line, source
# tail_indent > indent_level
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
buf.append(u('</%{def_or_block}>\n').format(def_or_block=tag))
return joined(buf), 0, '', source
def parse_plim_tail(lineno, indent_level, tail_line, source, syntax):
"""
:param lineno:
:param indent_level:
:param tail_line:
:param source:
:param syntax: an instance of one of :class:`plim.syntax.BaseSyntax` children.
:type syntax: :class:`plim.syntax.BaseSyntax`
:return:
"""
buf = []
tail_indent = indent_level
while tail_line:
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(indent_level, tail_line, matched_obj, source, syntax)
buf.append(parsed_data)
if tail_indent <= indent_level:
break
return buf, tail_indent, tail_line, source
# Miscellaneous utilities
# ==================================================================================
def enumerate_source(source):
"""
:param source:
:return:
"""
return enumerate(StringIO(source), start=1)
def scan_line(line):
""" Returns a 2-tuple of (length_of_the_indentation, line_without_preceding_indentation)
:param line:
:type line: str
"""
match = LINE_PARTS_RE.match(line)
return len(match.group('indent')), match.group('line')
def compile_plim_source(source, syntax, strip=True):
"""
:param source:
:param syntax: a syntax instance
:type syntax: :class:`plim.syntax.BaseSyntax`
:param strip: for embedded markup we don't want to strip whitespaces from result
:type strip: bool
:return:
"""
# A quick fix for templates with windows-style newlines.
# If you see any issues with it, consider altering lexer's NEWLINE.
source = source.replace('\r\n', '\n')
source = enumerate_source(source)
result = []
while True:
try:
lineno, line = next(source)
except StopIteration:
break
tail_indent, tail_line = scan_line(line)
if not line:
continue
while tail_line:
matched_obj, parse = search_parser(lineno, tail_line, syntax)
parsed_data, tail_indent, tail_line, source = parse(tail_indent, tail_line, matched_obj, source, syntax)
result.append(parsed_data)
result = joined(result)
if strip:
result = result.strip()
return result
# Acknowledgements
# ============================================================================================
EMPTY_TAGS = {'meta', 'img', 'link', 'input', 'area', 'base', 'col', 'br', 'hr'}
MARKUP_LANGUAGES = {
'md': markdown2.markdown,
'markdown': markdown2.markdown,
'rst': rst_to_html,
'rest': rst_to_html,
'coffee': coffee_to_js,
'scss': scss_to_css,
'sass': scss_to_css,
'stylus': stylus_to_css
}
DOCTYPES = {
'html':'<!DOCTYPE html>',
'5': '<!DOCTYPE html>',
'1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">',
'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'xml': '<?xml version="1.0" encoding="utf-8" ?>',
'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">',
'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">',
'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">',
}
| 35.778663
| 144
| 0.593694
|
8041a08171342152c767fff59a851e1c8f438a8f
| 5,247
|
py
|
Python
|
build/test_packages.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
build/test_packages.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
build/test_packages.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests generated CIPD packages.
Supposed to be invoked after build.py has run. Uses packages from out/*.cipd and
tests from tests/*.py.
Assumes cipd client is built in ../go/bin/cipd (true after build.py has run).
"""
import argparse
import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
# Root of infra.git repository.
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# .exe on Windows.
EXE_SUFFIX = '.exe' if sys.platform == 'win32' else ''
class TestException(Exception):
pass
def print_title(title):
"""Pretty prints a banner to stdout."""
sys.stdout.flush()
sys.stderr.flush()
print
print '-' * 80
print title
print '-' * 80
def get_docstring(test_script):
"""Hacky way to grab a first line of a module docstring using regexps."""
with open(test_script, 'rt') as f:
text = f.read()
m = re.match(r'^.*"""(.*?)"""', text, re.DOTALL)
if not m:
return None
return m.group(1).strip().splitlines()[0]
def run_test(cipd_client, package, work_dir, test_script):
"""Extracts a package to a dir and runs test_script with cwd == work_dir."""
print_title('Deploying %s' % os.path.basename(package))
if not os.access(cipd_client, os.X_OK):
print >> sys.stderr, (
'CIPD client at %s doesn\'t exist or not runnable. Run build.py to '
'build it.' % cipd_client)
return 1
cmd_line = ['cipd', 'pkg-deploy', '-root', work_dir, package]
print ' '.join(cmd_line)
if subprocess.call(args=cmd_line, executable=cipd_client):
raise TestException('Failed to install %s, see logs' % package)
print_title(get_docstring(test_script) or 'Running tests...')
cmd_line = ['python', test_script]
print '%s in %s' % (' '.join(cmd_line), work_dir)
env = os.environ.copy()
env.pop('PYTHONPATH', None)
ret = subprocess.call(
args=cmd_line, executable=sys.executable, env=env, cwd=work_dir)
if ret:
raise TestException('Non zero exit code (%d)' % ret)
def run(
cipd_client,
package_out_dir,
package_tests_dir,
work_dir,
packages):
"""Deployes build *.cipd package locally and runs tests against them.
Used to verify the packaged code works when installed as CIPD package, it is
important for infra_python package that has non-trivial structure.
Args:
cipd_client: path to cipd client executable.
package_out_dir: where to search for built packages.
work_dir: where to install/update packages into.
packages: names of *.cipd files in package_out_dir or [] for all.
Returns:
0 on success, 1 or error.
"""
# Discover what to test.
paths = []
if not packages:
paths = glob.glob(os.path.join(package_out_dir, '*.cipd'))
else:
for name in packages:
abs_path = os.path.join(package_out_dir, name)
if not os.path.isfile(abs_path):
raise TestException('No such package file: %s' % name)
paths.append(abs_path)
paths = sorted(paths)
if not paths:
print 'Nothing to test.'
return 0
# Run all tests sequentially. There're like 2 of them tops.
nuke_temp = False
if not work_dir:
work_dir = tempfile.mkdtemp(suffix='cipd_test')
nuke_temp = True
work_dir = os.path.abspath(work_dir)
try:
fail = False
for path in paths:
name = os.path.splitext(os.path.basename(path))[0]
test_script = os.path.join(package_tests_dir, '%s.py' % name)
if not os.path.isfile(test_script):
print 'Skipping tests for %s - no such file: %s' % (name, test_script)
continue
try:
run_test(
cipd_client=cipd_client,
package=path,
work_dir=os.path.join(work_dir, name),
test_script=test_script)
print ''
print 'PASS'
except TestException as exc:
print >> sys.stderr, ''
print >> sys.stderr, 'FAILED! ' * 10
print >> sys.stderr, 'Tests for %s failed: %s' % (name, exc)
fail = True
return 1 if fail else 0
finally:
if nuke_temp:
try:
shutil.rmtree(work_dir, ignore_errors=True)
except OSError as exc:
print >> sys.stderr, 'Failed to delete %s: %s' % (work_dir, exc)
def main(
args,
go_workspace=os.path.join(ROOT, 'go'),
package_out_dir=os.path.join(ROOT, 'build', 'out'),
package_tests_dir=os.path.join(ROOT, 'build', 'tests')):
parser = argparse.ArgumentParser(description='Tests infra CIPD packages')
parser.add_argument(
'packages', metavar='NAME', type=str, nargs='*',
help='name of a build package file in build/out/* to deploy and test')
parser.add_argument(
'--work-dir', metavar='DIR', dest='work_dir',
help='directory to deploy packages into (temporary dir by default)')
args = parser.parse_args(args)
return run(
os.path.join(go_workspace, 'bin', 'cipd' + EXE_SUFFIX),
package_out_dir,
package_tests_dir,
args.work_dir,
[n + '.cipd' if not n.endswith('.cipd') else n for n in args.packages])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 30.32948
| 80
| 0.662664
|
38e605892d4cac3ba9d85cecbd98c0b34a69e492
| 6,700
|
py
|
Python
|
pyscf/scf/__init__.py
|
zc62/pyscf
|
4ed05044d44d49c3ffd4d2e098e9031478fb39e7
|
[
"Apache-2.0"
] | null | null | null |
pyscf/scf/__init__.py
|
zc62/pyscf
|
4ed05044d44d49c3ffd4d2e098e9031478fb39e7
|
[
"Apache-2.0"
] | null | null | null |
pyscf/scf/__init__.py
|
zc62/pyscf
|
4ed05044d44d49c3ffd4d2e098e9031478fb39e7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Hartree-Fock
============
Simple usage::
>>> from pyscf import gto, scf
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1')
>>> mf = scf.RHF(mol).run()
:func:`scf.RHF` returns an instance of SCF class. There are some parameters
to control the SCF method.
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
chkfile : str
checkpoint file to save MOs, orbital energies etc.
conv_tol : float
converge threshold. Default is 1e-10
max_cycle : int
max number of iterations. Default is 50
init_guess : str
initial guess method. It can be one of 'minao', 'atom', '1e', 'chkfile'.
Default is 'minao'
DIIS : class listed in :mod:`scf.diis`
Default is :class:`diis.SCF_DIIS`. Set it to None/False to turn off DIIS.
diis : bool
whether to do DIIS. Default is True.
diis_space : int
DIIS space size. By default, 8 Fock matrices and errors vector are stored.
diis_start_cycle : int
The step to start DIIS. Default is 0.
level_shift_factor : float or int
Level shift (in AU) for virtual space. Default is 0.
direct_scf : bool
Direct SCF is used by default.
direct_scf_tol : float
Direct SCF cutoff threshold. Default is 1e-13.
callback : function
callback function takes one dict as the argument which is
generated by the builtin function :func:`locals`, so that the
callback function can access all local variables in the current
envrionment.
conv_check : bool
An extra cycle to check convergence after SCF iterations.
nelec : (int,int), for UHF/ROHF class
freeze the number of (alpha,beta) electrons.
irrep_nelec : dict, for symmetry- RHF/ROHF/UHF class only
to indicate the number of electrons for each irreps.
In RHF, give {'ir_name':int, ...} ;
In ROHF/UHF, give {'ir_name':(int,int), ...} .
It is effective when :attr:`Mole.symmetry` is set ``True``.
auxbasis : str, for density fitting SCF only
Auxiliary basis for density fitting.
>>> mf = scf.density_fit(scf.UHF(mol))
>>> mf.scf()
Density fitting can be applied to all non-relativistic HF class.
with_ssss : bool, for Dirac-Hartree-Fock only
If False, ignore small component integrals (SS|SS). Default is True.
with_gaunt : bool, for Dirac-Hartree-Fock only
If False, ignore Gaunt interaction. Default is False.
Saved results
converged : bool
SCF converged or not
e_tot : float
Total HF energy (electronic energy plus nuclear repulsion)
mo_energy :
Orbital energies
mo_occ
Orbital occupancy
mo_coeff
Orbital coefficients
'''
from pyscf.scf import hf
rhf = hf
from pyscf.scf import rohf
from pyscf.scf import hf_symm
rhf_symm = hf_symm
from pyscf.scf import uhf
from pyscf.scf import uhf_symm
from pyscf.scf import ghf
from pyscf.scf import ghf_symm
from pyscf.scf import dhf
from pyscf.scf import chkfile
from pyscf.scf import addons
from pyscf.scf import diis
from pyscf.scf import jk
from pyscf.scf.diis import DIIS, CDIIS, EDIIS, ADIIS
from pyscf.scf.uhf import spin_square
from pyscf.scf.hf import get_init_guess
from pyscf.scf.addons import *
def HF(mol, *args):
if mol.nelectron == 1 or mol.spin == 0:
return RHF(mol, *args)
else:
return UHF(mol, *args)
HF.__doc__ = '''
A wrap function to create SCF class (RHF or UHF).\n
''' + hf.SCF.__doc__
def RHF(mol, *args):
if mol.nelectron == 1:
if mol.symmetry:
return rhf_symm.HF1e(mol)
else:
return rohf.HF1e(mol)
elif not mol.symmetry or mol.groupname == 'C1':
if mol.spin > 0:
return rohf.ROHF(mol, *args)
else:
return rhf.RHF(mol, *args)
else:
if mol.spin > 0:
return rhf_symm.ROHF(mol, *args)
else:
return rhf_symm.RHF(mol, *args)
RHF.__doc__ = hf.RHF.__doc__
def ROHF(mol, *args):
if not mol.symmetry or mol.groupname == 'C1':
return rohf.ROHF(mol, *args)
else:
return hf_symm.ROHF(mol, *args)
ROHF.__doc__ = rohf.ROHF.__doc__
def UHF(mol, *args):
if mol.nelectron == 1:
if not mol.symmetry or mol.groupname == 'C1':
return uhf.HF1e(mol, *args)
else:
return uhf_symm.HF1e(mol, *args)
elif not mol.symmetry or mol.groupname == 'C1':
return uhf.UHF(mol, *args)
else:
return uhf_symm.UHF(mol, *args)
UHF.__doc__ = uhf.UHF.__doc__
def GHF(mol, *args):
if not mol.symmetry or mol.groupname == 'C1':
return ghf.GHF(mol, *args)
else:
return ghf_symm.GHF(mol, *args)
GHF.__doc__ = ghf.GHF.__doc__
def DHF(mol, *args):
if mol.nelectron == 1:
return dhf.HF1e(mol)
elif dhf.zquatev and mol.spin == 0:
return dhf.RDHF(mol, *args)
else:
return dhf.DHF(mol, *args)
DHF.__doc__ = dhf.DHF.__doc__
def X2C(mol, *args):
'''X2C UHF (in testing)'''
from pyscf.x2c import x2c
return x2c.UHF(mol, *args)
def sfx2c1e(mf):
return mf.sfx2c1e()
sfx2c = sfx2c1e
def density_fit(mf, auxbasis=None, with_df=None, only_dfj=False):
return mf.density_fit(auxbasis, with_df, only_dfj)
def newton(mf):
from pyscf.soscf import newton_ah
return newton_ah.newton(mf)
fast_newton = addons.fast_newton
def KS(mol, *args):
from pyscf import dft
return dft.KS(mol, *args)
def RKS(mol, *args):
from pyscf import dft
return dft.RKS(mol, *args)
def ROKS(mol, *args):
from pyscf import dft
return dft.ROKS(mol, *args)
def UKS(mol, *args):
from pyscf import dft
return dft.UKS(mol, *args)
def GKS(mol, *args):
from pyscf import dft
return dft.GKS(mol, *args)
def DKS(mol, *args):
from pyscf import dft
return dft.DKS(mol, *args)
| 29.385965
| 83
| 0.652687
|
c733c04b501fb23898b824654bd498c63815b5aa
| 604
|
py
|
Python
|
class4/exercise8.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
class4/exercise8.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
class4/exercise8.py
|
agonzo777/pynet
|
8b2c2bbd71ea001ba0dc2acb20a4d46c7ddeaa12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import netmiko
from netmiko import ConnectHandler
password = '88newclass'
pynet1 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': password,
}
pynet2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': password,
'port': 8022,
}
def main():
rlist = [pynet1, pynet2]
for router in rlist:
rtr = ConnectHandler(**router)
rtr.config_mode()
rtr.send_config_from_file(config_file='configfile.txt')
rtr.exit_config_mode()
main()
| 16.324324
| 63
| 0.607616
|
459d13738f398aa632faa8691524cb2781971f5f
| 207
|
py
|
Python
|
run.py
|
AhsanAliLodhi/VideoSyncBackend
|
c5cae2ac0a8d281d05b3c86f376212dda16a21d4
|
[
"MIT"
] | null | null | null |
run.py
|
AhsanAliLodhi/VideoSyncBackend
|
c5cae2ac0a8d281d05b3c86f376212dda16a21d4
|
[
"MIT"
] | null | null | null |
run.py
|
AhsanAliLodhi/VideoSyncBackend
|
c5cae2ac0a8d281d05b3c86f376212dda16a21d4
|
[
"MIT"
] | null | null | null |
import os
import config
from app.app import vs_app
if __name__ == '__main__':
vs_app.run(
host='0.0.0.0',
port=int(os.getenv('FLASK_PORT')),
debug=os.getenv('FLASK_DEBUG')
)
| 18.818182
| 42
| 0.608696
|
14f102d29f2548d0edd01422215c39e9124d541f
| 556
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/handy-for-mechanics-31080
|
b9b31e61a6c270257c67e293690d0aacb59929ab
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/handy-for-mechanics-31080
|
b9b31e61a6c270257c67e293690d0aacb59929ab
|
[
"FTL",
"AML",
"RSA-MD"
] | 45
|
2021-10-11T06:11:52.000Z
|
2022-03-28T06:14:55.000Z
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/handy-for-mechanics-31080
|
b9b31e61a6c270257c67e293690d0aacb59929ab
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "handy-for-mechanics-31080.botics.co"
site_params = {
"name": "Handy for mechanics",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 21.384615
| 61
| 0.661871
|
8f12be9fc8cbe7706feef4e33703db7073c5ec3e
| 3,207
|
py
|
Python
|
Financial Projects/LeakyGANLongTextGeneration/target_lstm.py
|
Lingesh2311/Python-Projects
|
9916cf580dff165a7348f52efd274c743961381d
|
[
"Apache-2.0"
] | null | null | null |
Financial Projects/LeakyGANLongTextGeneration/target_lstm.py
|
Lingesh2311/Python-Projects
|
9916cf580dff165a7348f52efd274c743961381d
|
[
"Apache-2.0"
] | null | null | null |
Financial Projects/LeakyGANLongTextGeneration/target_lstm.py
|
Lingesh2311/Python-Projects
|
9916cf580dff165a7348f52efd274c743961381d
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class TargetLSTM(nn.Module):
""" Target LSTM """
def __init__(self, vocab_size, embedding_dim, hidden_dim, use_cuda):
super(TargetLSTM, self).__init__()
self.hidden_dim = hidden_dim
self.use_cuda = use_cuda
self.embed = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, vocab_size)
self.log_softmax = nn.LogSoftmax(dim=1)
self.init_params()
def forward(self, x):
"""
Embeds input and applies LSTM on the input sequence.
Inputs: x
- x: (batch_size, seq_len), sequence of tokens generated by generator
Outputs: out
- out: (batch_size, vocab_size), lstm output prediction
"""
self.lstm.flatten_parameters()
h0, c0 = self.init_hidden(x.size(0))
emb = self.embed(x) # batch_size * seq_len * emb_dim
out, _ = self.lstm(emb, (h0, c0)) # out: seq_len * batch_size * hidden_dim
out = self.log_softmax(self.fc(out.contiguous().view(-1, self.hidden_dim))) # seq_len * batch_size * vocab_size
return out
def step(self, x, h, c):
"""
Embeds input and applies LSTM one token at a time (seq_len = 1).
Inputs: x, h, c
- x: (batch_size, 1), sequence of tokens generated by generator
- h: (1, batch_size, hidden_dim), lstm hidden state
- c: (1, batch_size, hidden_dim), lstm cell state
Outputs: out, h, c
- out: (batch_size, 1, vocab_size), lstm output prediction
- h: (1, batch_size, hidden_dim), lstm hidden state
- c: (1, batch_size, hidden_dim), lstm cell state
"""
self.lstm.flatten_parameters()
emb = self.embed(x) # batch_size * 1 * emb_dim
out, (h, c) = self.lstm(emb, (h, c)) # out: batch_size * 1 * hidden_dim
out = self.log_softmax(self.fc(out.contiguous().view(-1, self.hidden_dim))) # batch_size * vocab_size
return out, h, c
def init_hidden(self, batch_size):
h = torch.zeros((1, batch_size, self.hidden_dim))
c = torch.zeros((1, batch_size, self.hidden_dim))
if self.use_cuda:
h, c = h.cuda(), c.cuda()
return h, c
def init_params(self):
for param in self.parameters():
param.data.normal_(0, 1)
def sample(self, batch_size, seq_len):
"""
Samples the network and returns a batch of samples of length seq_len.
Outputs: out
- out: (batch_size * seq_len)
"""
samples = []
h, c = self.init_hidden(batch_size)
x = torch.zeros(batch_size, 1, dtype=torch.int64)
if self.use_cuda:
x = x.cuda()
for _ in range(seq_len):
out, h, c = self.step(x, h, c)
prob = torch.exp(out)
x = torch.multinomial(prob, 1)
samples.append(x)
out = torch.cat(samples, dim=1) # along the batch_size dimension
return out
| 41.649351
| 120
| 0.573745
|
60b6658bf2ce4e35b9f3e7d43f58479c3bd03f44
| 292
|
py
|
Python
|
Problem_2/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | 1
|
2022-03-28T05:32:58.000Z
|
2022-03-28T05:32:58.000Z
|
Problem_2/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | null | null | null |
Problem_2/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | null | null | null |
def fibonacci(maximum):
sequence = [1, 2]
n = 1
while sequence[n] < maximum:
sequence.append(sequence[n] + sequence[n-1])
n += 1
return sequence[:-1]
numbers = fibonacci(4e6)
total = 0
for n in numbers:
if n % 2 == 0:
total += n
print(total)
| 14.6
| 52
| 0.55137
|
d23dfeb5d558a8c4376a8bdb4e465321fc9c40e7
| 388
|
py
|
Python
|
reviews/migrations/0005_auto_20191122_2326.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
reviews/migrations/0005_auto_20191122_2326.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | 9
|
2020-01-10T14:10:02.000Z
|
2022-03-12T00:08:19.000Z
|
reviews/migrations/0005_auto_20191122_2326.py
|
yun-mh/uniwalk
|
f5307f6970b24736d13b56b4792c580398c35b3a
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-11-22 14:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0004_auto_20191122_2319'),
]
operations = [
migrations.AlterField(
model_name='review',
name='text',
field=models.TextField(verbose_name='本文'),
),
]
| 20.421053
| 54
| 0.595361
|
7bd559ed955c1cd04e6a2d1dbf95247bd8899b3e
| 797
|
py
|
Python
|
tests/test_application.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
tests/test_application.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
tests/test_application.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3.6
from pycatia import CATIAApplication
catia = CATIAApplication()
cat_part = r'tests/CF_catia_measurable_part.CATPart'
def test_application():
assert 'CATIAApplication' in catia.__repr__()
def test_refresh():
documents = catia.documents()
documents.open(cat_part)
document = catia.document()
catia.refresh_display(state=False)
assert catia.refresh_display() is False
catia.refresh_display(state=True)
assert catia.refresh_display() is True
document.close()
def test_visible():
documents = catia.documents()
documents.open(cat_part)
document = catia.document()
catia.visible(state=False)
assert catia.visible() is False
catia.visible(state=True)
assert catia.visible() is True
document.close()
| 19.439024
| 52
| 0.716437
|
5739f7f93670c253bce5dd8e6153b72f8b79cb0a
| 7,090
|
py
|
Python
|
smbscraper.py
|
jpsenior/smbscraper
|
7fe9b7f7cf5419b559a5db8216d9aa8ad63a38e7
|
[
"MIT"
] | 5
|
2015-02-27T01:39:47.000Z
|
2021-09-24T12:53:27.000Z
|
smbscraper.py
|
jpsenior/smbscraper
|
7fe9b7f7cf5419b559a5db8216d9aa8ad63a38e7
|
[
"MIT"
] | 3
|
2015-02-27T01:56:41.000Z
|
2015-02-27T01:57:00.000Z
|
smbscraper.py
|
jpsenior/smbscraper
|
7fe9b7f7cf5419b559a5db8216d9aa8ad63a38e7
|
[
"MIT"
] | 1
|
2017-01-04T16:51:27.000Z
|
2017-01-04T16:51:27.000Z
|
#!/usr/bin/python
# Grab credentials from a second credentials store.
# The MIT License (MIT)
#
# Copyright (c) 2015 JP Senior jp.senior@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import credentials
import re
import tempfile
import socket
import sys
from smb.SMBConnection import SMBConnection
from smb.smb_structs import OperationFailure
from smb.base import NotConnectedError
def sizeof_fmt(num):
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
#name of the host you want to audit. TODO: Take this as a command line variable?
port=139
debug = False
# Meant for many file extensions
searchexts=['ini','bak','cmd','txt','text','conf','cfg','reg','config','lnk','pif']
#These are search words to look for within these files.
searchstrings={
'username':r'(?i)user(name)',
'password':r'(?i)password',
'version':r'(?i)version',
'Credit_Card_Track_1':r'(\D|^)\%?[Bb]\d{13,19}\^[\-\/\.\w\s]{2,26}\^[0-9][0-9][01][0-9][0-9]{3}',
'Credit_Card_Track_2':r'(\D|^)\;\d{13,19}\=(\d{3}|)(\d{4}|\=)',
'Credit_Card_Track_Data':r'[1-9][0-9]{2}\-[0-9]{2}\-[0-9]{4}\^\d',
'Mastercard':r'(\D|^)5[1-5][0-9]{2}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\D|$)',
'Visa':r'(\D|^)4[0-9]{3}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\D|$)',
'AMEX':r'(\D|^)(34|37)[0-9]{2}(\ |\-|)[0-9]{6}(\ |\-|)[0-9]{5}(\D|$)',
'Diners_Club_1':r'(\D|^)30[0-5][0-9](\ |\-|)[0-9]{6}(\ |\-|)[0-9]{4}(\D|$)',
'Diners_Club_2':r'(\D|^)(36|38)[0-9]{2}(\ |\-|)[0-9]{6}(\ |\-|)[0-9]{4}(\D|$)',
'Discover':r'(\D|^)6011(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\D|$)',
'JCB_1':r'(\D|^)3[0-9]{3}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\ |\-|)[0-9]{4}(\D|$)',
'JCB_2':r'(\D|^)(2131|1800)[0-9]{11}(\D|$)',
'Social_Security_Number_dashes':r'(\D|^)[0-9]{3}\-[0-9]{2}\-[0-9]{4}(\D|$)',
'Social_Security_Number_spaces':r'(\D|^)[0-9]{3}\ [0-9]{2}\ [0-9]{4}(\D|$)',
}
searchreg='|'.join(searchexts)
p = re.compile(searchreg, re.IGNORECASE)
#initialize the array; very helpful if you keep re-running the script. This one is a lazy global. JP to fix later.
searchlist={}
def listshares(conn,host):
print ("%15s %9s %5s %4s %s") % ("Name","Temporary","Special","Type","Comments")
for s in conn.listShares():
print ("%15s %9s %5s %4s %s") % (s.name, s.isTemporary, s.isSpecial, s.type, s.comments)
listfiles(s.name, "",conn,host)
#There is no associative array for smb objects, so let's just pretend here that this actually works.
def listfiles(volume,parent,conn,host):
if parent=="": print "Listing files within",volume,"on",host
if debug: print "DEBUG search(" + volume + ',' + parent + ')'
try:
for f in conn.listPath(volume, parent):
if f.filename != '.' and f.filename != '..':
path = "smb://" + host.lower() + "/" + volume + '/' + parent + f.filename
if not f.isDirectory and p.search(path):
searchlist.update({(host,volume,parent + '/' + f.filename):{
'host':host,
'volume':volume,
'alloc_size':f.alloc_size,
'create_time':f.create_time,
'file_attributes':f.file_attributes,
'file_size':f.file_size,
'path':parent,
'filename':f.filename,
'last_access_time':f.last_access_time,
'last_attr_change_time':f.last_attr_change_time,
'last_write_time':f.last_write_time,
'short_name':f.short_name}
})
if debug: print "SCRAPING:", path, "matches file extension regular expression"
if f.isDirectory and f.filename != '.' and f.filename != "..":
if debug: "DEBUG: Calling search(" + volume + "," + parent + " + " + f.filename + "/)"
listfiles(volume, parent + f.filename + '/',conn,host)
except OperationFailure as e:
if debug: print 'Could not open', parent
if debug: print e
#This actually scans files.
def scanfiles(files,conn):
for i in files:
host = i[0]
volume = i[1]
alloc_size = files[i]['alloc_size']
create_time = files[i]['create_time']
file_attributes = files[i]['file_attributes']
file_size = files[i]['alloc_size']
last_access_time = files[i]['last_access_time']
path = files[i]['path']
filename = files[i]['filename']
last_attr_change_time = files[i]['last_attr_change_time']
last_write_time = files[i]['last_write_time']
short_name = files[i]['short_name']
if debug: print 'SCANNING: %20s %10s %5s %s %s %s ' % (last_write_time, sizeof_fmt(file_size), file_attributes, host, volume, path + '/' + filename)
file_obj = tempfile.NamedTemporaryFile()
try:
file_attr, fsize = conn.retrieveFile(volume, path + '/' + filename, file_obj)
if debug: print file_obj
file_obj.seek(0)
x = 0
for line in file_obj:
x = x + 1
if debug: print "DEBUG LISTFILE: %d: %s" % (x, line)
for r in searchstrings:
result=re.search(searchstrings[r], line)
if result:
print 'Violation found in file: %20s %10s %5s %s %s %s ' % (last_write_time, sizeof_fmt(file_size), file_attributes, host, volume, path + '/' + filename)
print 'Found',r,"matching",searchstrings[r],"->", result.group(0)
print '>',line
file_obj.close
except OperationFailure as e:
if debug: print 'Could not open', path + '/' + filename
if debug: print e
def scanhost(host,destIP,port):
#Go for it
try:
print "*** Initiating SMB scan for",host,"(" + destIP + ") on port",port
conn = SMBConnection(credentials.username, credentials.password, credentials.clientname, host, domain=credentials.domainname, use_ntlm_v2=True)
assert conn.connect(destIP, port)
listshares(conn,host)
#volume='SYSVOL'
#search(volume,'')
scanfiles(searchlist,conn)
except NotConnectedError:
print "Could not connect to server name", host
i=0
for h in sys.argv:
if not i == 0:
destIP=socket.gethostbyname(h)
print h, destIP
scanhost(h,destIP,port)
i = i + 1
| 39.608939
| 165
| 0.620592
|
6e93c9351fc377ebffae811c42c6bc6084ad2baa
| 11,963
|
py
|
Python
|
build.py
|
NatureGeorge/pyRMSD
|
9b45fcad944596a021823c9d5a5b73fcddbcd1fc
|
[
"MIT"
] | 1
|
2021-02-18T19:15:58.000Z
|
2021-02-18T19:15:58.000Z
|
build.py
|
NatureGeorge/pyRMSD
|
9b45fcad944596a021823c9d5a5b73fcddbcd1fc
|
[
"MIT"
] | null | null | null |
build.py
|
NatureGeorge/pyRMSD
|
9b45fcad944596a021823c9d5a5b73fcddbcd1fc
|
[
"MIT"
] | 2
|
2020-12-11T14:57:48.000Z
|
2021-08-03T12:22:44.000Z
|
from __future__ import print_function
from build_utils import compile_a_file_collection, Link
import optparse
import collections
import os
from build_config import get_config_options_for
if __name__ == '__main__':
parser = optparse.OptionParser(usage='%prog [--build-conf] [--cuda] [--build] [--clean] [--clean-all]', version='4.0.0')
parser.add_option('--build-conf', dest = "conf_file", help="Determines the file storing build configuration info.")
parser.add_option('--cuda', dest = "cuda_type", help="Use this flag if you want to compile the CUDA calculator.")
parser.add_option('--build', dest = "build", action="store_true", help="Use this flag if you want to compile pyRMSD.")
parser.add_option('--clean', dest = "clear", action="store_true", help="Clear all .o generated files.")
parser.add_option('--clean-all', dest = "clear_all", action="store_true", help="The same as --clear, but it also removes generates libs and exes.")
options, args = parser.parse_args()
# Load configuration
conf = get_config_options_for(os.path.join("build_conf","default.conf"), options.conf_file)
######################################
### SET CUDA FLAGS
######################################
CUDA_PRECISION_FLAG = ""
if not options.cuda_type is None:
if options.cuda_type == "single":
CUDA_PRECISION_FLAG = "CUDA_PRECISION_SINGLE"
elif options.cuda_type =="double":
CUDA_PRECISION_FLAG = "CUDA_PRECISION_DOUBLE"
else:
parser.error("Please, choose precision for CUDA building: 'single' or 'double'")
options.use_cuda = True
conf["CUDA_OPTIONS"] = conf["CUDA_OPTIONS"] +" -D"+CUDA_PRECISION_FLAG
conf["DEFINE_USE_CUDA"] = conf["DEFINE_USE_CUDA"] +" -D"+CUDA_PRECISION_FLAG
else:
options.use_cuda = False
######################################
######################################
### FILE DESCRIPTION
######################################
files_to_compile_with_nvcc = {
"src/calculators/QCP":["QCPCUDAKernel.cu","QCPCUDAMemKernel.cu","kernel_functions_cuda.cu"]
}
files_to_compile_with_gcc = {
"src/calculators":["RMSDCalculator.cpp","RMSDTools.cpp","RMSDCalculationData.cpp","KernelFunctions.cpp"],
"src/calculators/factory":["RMSDCalculatorFactory.cpp"],
"src/calculators/KABSCH":["KABSCHSerialKernel.cpp"],
"src/calculators/QTRFIT":["QTRFITSerialKernel.cpp"],
"src/calculators/QCP":["QCPSerialKernel.cpp","QCPSerialFloatKernel.cpp"],
"src/calculators/NOSUP":["NOSUPSerialKernel.cpp"],
"src/matrix":["Matrix.cpp","Statistics.cpp"],
"src/python":["pyRMSD.cpp"],
"src/pdbreaderlite":["PDBReader.cpp","PDBReaderObject.cpp"],
"src/calculators/test":["main.cpp","test_tools.cpp","tests.cpp"],
}
files_to_compile_with_gcc_and_openmp = {
"src/calculators/KABSCH":["KABSCHOmpKernel.cpp"],
"src/calculators/QTRFIT":["QTRFITOmpKernel.cpp"],
"src/calculators/QCP":["QCPOmpKernel.cpp"],
"src/calculators/NOSUP":["NOSUPOmpKernel.cpp"]
}
######################################
#########################################
###
### BUILDING PROCESS
###
#########################################
if options.build:
files_to_link = collections.defaultdict(str)
if options.use_cuda:
conf["PYTHON_EXTENSION_OPTIONS"] = conf["PYTHON_EXTENSION_OPTIONS"]+" "+conf["DEFINE_USE_CUDA"]
compile_a_file_collection(conf["BASE_DIR"], files_to_compile_with_gcc, "gcc", conf["PYTHON_EXTENSION_OPTIONS"], [conf["PYTHON_INCLUDE_FOLDER"], conf["NUMPY_INCLUDE"]], ".o",files_to_link)
compile_a_file_collection(conf["BASE_DIR"], files_to_compile_with_gcc_and_openmp, "gcc", conf["PYTHON_EXTENSION_OPTIONS"]+" "+conf["OPENMP_OPTION"], [conf["PYTHON_INCLUDE_FOLDER"], conf["NUMPY_INCLUDE"]], ".o",files_to_link)
if options.use_cuda:
compile_a_file_collection(conf["BASE_DIR"], files_to_compile_with_nvcc, "nvcc", conf["CUDA_OPTIONS"], [conf["CUDA_INCLUDE"]], ".o",files_to_link)
linkDSL = Link().\
using("g++").\
with_options([conf["PYTHON_EXTENSION_LINKING_OPTIONS"],conf["OPENMP_OPTION"]]).\
using_libs([conf["PYTHON_LIBRARY"]]).\
using_lib_locations([conf["PYTHON_LIBRARY_FOLDER"]]).\
this_object_files([files_to_link["Matrix"],files_to_link["Statistics"]]).\
to_produce("condensedMatrix.so")
os.system('echo "\033[34m'+ linkDSL.getLinkingCommand()+'\033[0m"')
os.system( linkDSL.getLinkingCommand())
linkDSL = Link().\
using("g++").\
with_options([conf["PYTHON_EXTENSION_LINKING_OPTIONS"],conf["OPENMP_OPTION"]]).\
using_libs([conf["PYTHON_LIBRARY"]]).\
using_lib_locations([conf["PYTHON_LIBRARY_FOLDER"]]).\
this_object_files([files_to_link["PDBReaderObject"],files_to_link["PDBReader"]]).\
to_produce("pdbReader.so")
os.system('echo "\033[34m'+ linkDSL.getLinkingCommand()+'\033[0m"')
os.system( linkDSL.getLinkingCommand())
calculator_obj_files = [
files_to_link["RMSDTools"],
files_to_link["KernelFunctions"],
files_to_link["KABSCHSerialKernel"],
files_to_link["KABSCHOmpKernel"],
files_to_link["QTRFITSerialKernel"],
files_to_link["QTRFITOmpKernel"],
files_to_link["QCPSerialKernel"],
files_to_link["QCPSerialFloatKernel"],
files_to_link["QCPOmpKernel"],
files_to_link["NOSUPSerialKernel"],
files_to_link["NOSUPOmpKernel"],
files_to_link["RMSDCalculatorFactory"],
files_to_link["RMSDCalculationData"],
files_to_link["RMSDCalculator"],
files_to_link["pyRMSD"]
]
if options.use_cuda:
calculator_obj_files.extend([files_to_link["QCPCUDAKernel"],files_to_link["QCPCUDAMemKernel"],files_to_link["kernel_functions_cuda"]])
calculator_libraries = [conf["PYTHON_LIBRARY"],conf["CUDA_LIBRARY"]]
calculator_library_locations = [conf["PYTHON_LIBRARY_FOLDER"], conf["CUDA_LIBRARIES"]]
else:
calculator_libraries = [conf["PYTHON_LIBRARY"]]
calculator_library_locations = [conf["PYTHON_LIBRARY_FOLDER"]]
linkDSL = Link().\
using("g++").\
with_options([conf["PYTHON_EXTENSION_LINKING_OPTIONS"],conf["OPENMP_OPTION"]]).\
using_libs(calculator_libraries).\
using_lib_locations(calculator_library_locations).\
this_object_files(calculator_obj_files).\
to_produce("calculators.so")
os.system('echo "\033[34m'+ linkDSL.getLinkingCommand()+'\033[0m"')
os.system(linkDSL.getLinkingCommand())
test_obj_files = list(calculator_obj_files)
test_obj_files.remove(files_to_link["pyRMSD"])
test_obj_files.extend([files_to_link["main"], files_to_link["test_tools"], files_to_link["tests"]])
linkDSL = Link().\
using("g++").\
with_options([conf["OPENMP_OPTION"]]).\
using_libs(calculator_libraries).\
using_lib_locations(calculator_library_locations).\
this_object_files(test_obj_files).\
to_produce("test_rmsdtools_main")
os.system('echo "\033[34m'+ linkDSL.getLinkingCommand()+'\033[0m"')
os.system(linkDSL.getLinkingCommand())
os.system("mv calculators.so pyRMSD/")
os.system("mv condensedMatrix.so pyRMSD/")
os.system("mv pdbReader.so pyRMSD/")
os.system("mv test_rmsdtools_main src/calculators/test")
##Calculators
if options.use_cuda:
calcs_str = """
def availableCalculators():
return {
"KABSCH_SERIAL_CALCULATOR": 0,
"KABSCH_OMP_CALCULATOR":1,
#"KABSCH_CUDA_CALCULATOR":2,
"QTRFIT_SERIAL_CALCULATOR":3,
"QTRFIT_OMP_CALCULATOR":4,
#"QTRFIT_CUDA_CALCULATOR":5,
"QCP_SERIAL_CALCULATOR":6,
#"QCP_SERIAL_FLOAT_CALCULATOR":7,
"QCP_OMP_CALCULATOR":8,
"QCP_CUDA_CALCULATOR":9,
"QCP_CUDA_MEM_CALCULATOR":10,
"NOSUP_SERIAL_CALCULATOR":11,
"NOSUP_OMP_CALCULATOR":12,
#"NOSUP_CUDA_CALCULATOR":13
}
"""
else:
calcs_str = """
def availableCalculators():
return {
"KABSCH_SERIAL_CALCULATOR": 0,
"KABSCH_OMP_CALCULATOR":1,
#"KABSCH_CUDA_CALCULATOR":2,
"QTRFIT_SERIAL_CALCULATOR":3,
"QTRFIT_OMP_CALCULATOR":4,
#"QTRFIT_CUDA_CALCULATOR":5,
"QCP_SERIAL_CALCULATOR":6,
#"QCP_SERIAL_FLOAT_CALCULATOR":7,
"QCP_OMP_CALCULATOR":8,
#"QCP_CUDA_CALCULATOR":9,
#"QCP_CUDA_MEM_CALCULATOR":10,
"NOSUP_SERIAL_CALCULATOR":11,
"NOSUP_OMP_CALCULATOR":12,
#"NOSUP_CUDA_CALCULATOR":13
}
"""
os.system('echo "\033[33mWriting available calculators...\033[0m"')
open("pyRMSD/availableCalculators.py","w").write(calcs_str)
# Save all produced files
produced_file_handler = open(".products","w")
for produced_file in files_to_link:
if files_to_link[produced_file] != "":
produced_file_handler.write(files_to_link[produced_file] +"\n")
produced_file_handler.close()
######################################
#########################################
###
### REMOVE ALL .o AND TRACKING FILES
###
#########################################
if options.clear or options.clear_all:
os.system('echo "\033[32mCleaning...\033[0m"')
if os.path.exists(".products"):
produced_file = open(".products","r")
for produced_file_line in produced_file:
os.system("rm "+produced_file_line)
print("rm "+produced_file_line[:-1])
produced_file.close()
# Clear the products file itself
os.system("rm .products")
# Remove all trackers
os.system("find src/ -name '.modif*' -exec rm {} \;")
# remove .pyc
os.system("find src/ -name '*.pyc' -exec rm {} \;")
######################################
#########################################
###
### REMOVE ALL LIBS AND EXES
###
#########################################
if options.clear_all:
os.system('echo "\033[32mCleaning exes and libs...\033[0m"')
os.system("rm pyRMSD/calculators.so")
os.system("rm pyRMSD/condensedMatrix.so")
os.system("rm pyRMSD/pdbReader.so")
os.system("rm src/calculators/test/test_rmsdtools_main")
######################################
| 48.044177
| 232
| 0.545181
|
53098a6cf5f7c1a3499b6c070bf611f918a2e7d4
| 24,657
|
py
|
Python
|
filebeat/tests/system/test_crawler.py
|
siye1982/packetbeat-finagle-thrift
|
53a87097b09455d55c14e72189cbc22481546e45
|
[
"Apache-2.0"
] | 3
|
2017-08-08T20:08:53.000Z
|
2021-09-16T14:38:00.000Z
|
filebeat/tests/system/test_crawler.py
|
siye1982/packetbeat-finagle-thrift
|
53a87097b09455d55c14e72189cbc22481546e45
|
[
"Apache-2.0"
] | 1
|
2016-07-19T09:33:37.000Z
|
2016-07-19T10:29:54.000Z
|
vendor/github.com/elastic/beats/filebeat/tests/system/test_crawler.py
|
radoondas/logstashbeat
|
d3cbf2b0d6bcb6bd199cd5f0fece879783b2ad62
|
[
"Apache-2.0"
] | 7
|
2017-11-24T23:58:46.000Z
|
2020-03-31T15:57:07.000Z
|
# -*- coding: utf-8 -*-
from filebeat import BaseTest
import codecs
import os
import time
from nose.plugins.skip import Skip, SkipTest
import shutil
# Additional tests to be added:
# * Check what happens when file renamed -> no recrawling should happen
# * Check if file descriptor is "closed" when file disappears
class Test(BaseTest):
def test_fetched_lines(self):
"""
Checks if all lines are read from the log file.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 80
for n in range(0, iterations):
file.write("hello world" + str(n))
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=iterations), max_timeout=10)
# TODO: Find better solution when filebeat did crawl the file
# Idea: Special flag to filebeat so that filebeat is only doing and
# crawl and then finishes
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations == len(output)
def test_unfinished_line_and_continue(self):
"""
Checks that if a line does not have a line ending, is is not read yet.
Continuing writing the file must the pick up the line.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w', 0)
iterations = 80
for n in range(0, iterations):
file.write("hello world" + str(n))
file.write("\n")
# An additional line is written to the log file. This line should not
# be read as there is no finishing \n or \r
file.write("unfinished line")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=80),
max_timeout=15)
# Give it more time to make sure it doesn't read the unfinished line
# This must be smaller then partial_line_waiting
time.sleep(1)
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations == len(output)
# Complete line so it can be picked up
file.write("\n")
self.wait_until(
lambda: self.output_has(lines=81),
max_timeout=15)
# Add one more line to make sure it keeps reading
file.write("HelloWorld \n")
file.close()
self.wait_until(
lambda: self.output_has(lines=82),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has also the completed lines
assert iterations + 2 == len(output)
def test_partial_line(self):
"""
Checks that partial lines are read as intended
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w', 0)
# An additional line is written to the log file. This line should not
# be read as there is no finishing \n or \r
file.write("complete line\n")
file.write("unfinished line ")
filebeat = self.start_beat()
# Check that unfinished line is read after timeout and sent
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
file.write("extend unfinished line")
time.sleep(1)
# Check that unfinished line is still not read
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
file.write("\n")
# Check that unfinished line is now read
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=15)
file.write("hello world\n")
# Check that new line is read
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_file_renaming(self):
"""
Makes sure that when a file is renamed, the content is not read again.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile1 = self.working_dir + "/log/test-old.log"
file = open(testfile1, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("old file")
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
# Rename the file (no new file created)
testfile2 = self.working_dir + "/log/test-new.log"
os.rename(testfile1, testfile2)
file = open(testfile2, 'a')
# using 6 events to have a separate log line that we can
# grep for.
iterations2 = 6
for n in range(0, iterations2):
file.write("new file")
file.write("\n")
file.close()
# expecting 6 more events
self.wait_until(
lambda: self.output_has(lines=iterations1+iterations2), max_timeout=10)
filebeat.check_kill_and_wait()
output = self.read_output()
# Make sure all 11 lines were read
assert len(output) == 11
def test_file_disappear(self):
"""
Checks that filebeat keeps running in case a log files is deleted
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations1 = 5
for n in range(0, iterations1):
file.write("disappearing file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.remove(testfile)
# Create new file to check if new file is picked up
testfile2 = self.working_dir + "/log/test2.log"
file = open(testfile2, 'w')
iterations2 = 6
for n in range(0, iterations2):
file.write("new file")
file.write("\n")
file.close()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure new file was picked up, old file should stay in
assert len(data) == 2
# Make sure output has 10 entries
output = self.read_output()
assert len(output) == 5 + 6
def test_file_disappear_appear(self):
"""
Checks that filebeat keeps running in case a log files is deleted
On Windows this tests in addition if the file was closed as it couldn't be found anymore
If Windows does not close the file, a new one can't be created.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*.log",
close_removed="true",
scan_frequency="0.1s"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
testfilenew = self.working_dir + "/log/hiddenfile"
file = open(testfile, 'w')
# Creates testfile now, to prevent inode reuse
open(testfilenew, 'a').close()
iterations1 = 5
for n in range(0, iterations1):
file.write("disappearing file")
file.write("\n")
file.close()
filebeat = self.start_beat()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1), max_timeout=10)
os.remove(testfile)
# Wait until error shows up on windows
self.wait_until(
lambda: self.log_contains(
"Closing because close_removed is enabled"),
max_timeout=15)
# Move file to old file name
shutil.move(testfilenew, testfile)
file = open(testfile, 'w')
iterations2 = 6
for n in range(0, iterations2):
file.write("new file")
file.write("\n")
file.close()
# Let it read the file
self.wait_until(
lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10)
filebeat.check_kill_and_wait()
data = self.get_registry()
# Make sure new file was picked up. As it has the same file name,
# one entry for the new file and one for the old should exist
assert len(data) == 2
# Make sure output has 11 entries, the new file was started
# from scratch
output = self.read_output()
assert len(output) == 5 + 6
def test_new_line_on_existing_file(self):
"""
Checks that filebeat follows future writes to the same
file.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w') as f:
f.write("hello world\n")
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1), max_timeout=10)
with open(testfile, 'a') as f:
# now write another line
f.write("hello world 1\n")
f.write("hello world 2\n")
self.wait_until(
lambda: self.output_has(lines=1+2), max_timeout=10)
filebeat.check_kill_and_wait()
# Check that output file has the same number of lines as the log file
output = self.read_output()
assert len(output) == 3
def test_multiple_appends(self):
"""
Test that filebeat keeps picking up new lines
after appending multiple times
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# Write initial file
with open(testfile, 'w') as f:
f.write("hello world\n")
f.flush()
self.wait_until(
lambda: self.output_has(1),
max_timeout=15)
lines_written = 0
for n in range(3):
with open(testfile, 'a') as f:
for i in range(0, 20 + n):
f.write("hello world " + str(i) + " " + str(n) + "\n")
lines_written = lines_written + 1
f.flush()
self.wait_until(
lambda: self.output_has(lines_written + 1),
max_timeout=15)
filebeat.check_kill_and_wait()
# Check that output file has the same number of lines as the log file
output = self.read_output()
assert len(output) == (3 * 20 + sum(range(0, 3)) + 1)
def test_new_line_on_open_file(self):
"""
Checks that filebeat follows future writes to the same
file. Same as the test_new_line_on_existing_file but this
time keep the file open and just flush it.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w') as f:
f.write("hello world\n")
f.flush()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# now write another line
f.write("hello world 1\n")
f.write("hello world 2\n")
f.flush()
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=15)
filebeat.check_kill_and_wait()
# Check that output file has the same number of lines as the log file
output = self.read_output()
assert len(output) == 3
def test_tail_files(self):
"""
Tests that every new file discovered is started
at the end and not beginning
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
tailFiles="true"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w') as f:
# Write lines before registar started
f.write("hello world 1\n")
f.write("hello world 2\n")
f.flush()
filebeat = self.start_beat()
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=5)
with open(testfile, 'a') as f:
# write additional lines
f.write("hello world 3\n")
f.write("hello world 4\n")
f.flush()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=15)
filebeat.check_kill_and_wait()
# Make sure output has only 2 and not 4 lines, means it started at
# the end
output = self.read_output()
assert len(output) == 2
def test_utf8(self):
"""
Tests that UTF-8 chars don't break our log tailing.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
encoding="utf-8"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=15)
# Add utf-8 Chars for the first time
with codecs.open(testfile, "w", "utf-8") as f:
# Write lines before registar started
# Special encoding needed?!?
f.write("ニコラスRuflin".decode("utf-8") + "\n")
f.flush()
self.wait_until(
lambda: self.output_has(lines=1), max_timeout=10)
# Append utf-8 chars to check if it keeps reading
with codecs.open(testfile, "a") as f:
# write additional lines
f.write("Hello\n")
f.write("薩科Ruflin" + "\n")
f.flush()
self.wait_until(
lambda: self.output_has(lines=1+2), max_timeout=10)
filebeat.check_kill_and_wait()
# Make sure output has 3
output = self.read_output()
assert len(output) == 3
def test_encodings(self):
"""
Tests that several common encodings work.
"""
# Sample texts are from http://www.columbia.edu/~kermit/utf8.html
encodings = [
# golang, python, sample text
("plain", "ascii", u"I can eat glass"),
("utf-8", "utf_8",
u"ὕαλον ϕαγεῖν δύναμαι· τοῦτο οὔ με βλάπτει."),
("utf-16be", "utf_16_be",
u"Pot să mănânc sticlă și ea nu mă rănește."),
("utf-16le", "utf_16_le",
u"काचं शक्नोम्यत्तुम् । नोपहिनस्ति माम् ॥"),
("latin1", "latin1",
u"I kå Glas frässa, ond des macht mr nix!"),
("BIG5", "big5", u"我能吞下玻璃而不傷身體。"),
("gb18030", "gb18030", u"我能吞下玻璃而不傷身。體"),
("euc-kr", "euckr", u" 나는 유리를 먹을 수 있어요. 그래도 아프지 않아요"),
("euc-jp", "eucjp", u"私はガラスを食べられます。それは私を傷つけません。")
]
# create a file in each encoding
os.mkdir(self.working_dir + "/log/")
for _, enc_py, text in encodings:
with codecs.open(self.working_dir + "/log/test-{}".format(enc_py),
"w", enc_py) as f:
f.write(text + "\n")
# create the config file
prospectors = []
for enc_go, enc_py, _ in encodings:
prospectors.append({
"path": self.working_dir + "/log/test-{}".format(enc_py),
"encoding": enc_go
})
self.render_config_template(
template="filebeat_prospectors.yml.j2",
prospectors=prospectors
)
# run filebeat
filebeat = self.start_beat()
self.wait_until(lambda: self.output_has(lines=len(encodings)),
max_timeout=15)
# write another line in all files
for _, enc_py, text in encodings:
with codecs.open(self.working_dir + "/log/test-{}".format(enc_py),
"a", enc_py) as f:
f.write(text + " 2" + "\n")
# wait again
self.wait_until(lambda: self.output_has(lines=len(encodings) * 2),
max_timeout=15)
filebeat.check_kill_and_wait()
# check that all outputs are present in the JSONs in UTF-8
# encoding
output = self.read_output()
lines = [o["message"] for o in output]
for _, _, text in encodings:
assert text in lines
assert text + " 2" in lines
def test_include_lines(self):
"""
Checks if only the log lines defined by include_lines are exported
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
include_lines=["^ERR", "^WARN"]
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 20
for n in range(0, iterations):
file.write("DBG: a simple debug message" + str(n))
file.write("\n")
file.write("ERR: a simple error message" + str(n))
file.write("\n")
file.write("WARNING: a simple warning message" + str(n))
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(40),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations * 2 == len(output)
def test_default_include_exclude_lines(self):
"""
Checks if all the log lines are exported by default
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 20
for n in range(0, iterations):
file.write("DBG: a simple debug message" + str(n))
file.write("\n")
file.write("ERR: a simple error message" + str(n))
file.write("\n")
file.write("WARNING: a simple warning message" + str(n))
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(60),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations * 3 == len(output)
def test_exclude_lines(self):
"""
Checks if the lines matching exclude_lines regexp are dropped
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
exclude_lines=["^DBG"]
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 20
for n in range(0, iterations):
file.write("DBG: a simple debug message" + str(n))
file.write("\n")
file.write("ERR: a simple error message" + str(n))
file.write("\n")
file.write("WARNING: a simple warning message" + str(n))
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(40),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations * 2 == len(output)
def test_include_exclude_lines(self):
"""
Checks if all the log lines are exported by default
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
exclude_lines=["^DBG"],
include_lines=["apache"]
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 20
for n in range(0, iterations):
file.write("DBG: a simple debug message" + str(n))
file.write("\n")
file.write("ERR: apache simple error message" + str(n))
file.write("\n")
file.write("ERR: a simple warning message" + str(n))
file.write("\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(20),
max_timeout=15)
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert iterations == len(output)
def test_file_no_permission(self):
"""
Checks that filebeat handles files without reading permission well
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 3
for n in range(0, iterations):
file.write("Hello World" + str(n))
file.write("\n")
file.close()
# Remove reading rights from file
os.chmod(testfile, 0o000)
if os.name == "nt":
raise SkipTest
# TODO: Currently skipping this test on windows as it requires `pip install win32api`
# which seems to have windows only dependencies.
# To solve this problem a requirements_windows.txt could be introduced which would
# then only be used on Windows.
#
# Below is some additional code to give some indication on how the implementation could
# look like.
from win32 import win32api
import win32security
import ntsecuritycon as con
user, domain, type = win32security.LookupAccountName(
"", win32api.GetUserName())
sd = win32security.GetFileSecurity(
testfile, win32security.DACL_SECURITY_INFORMATION)
dacl = win32security.ACL()
# Remove all access rights
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, 0, user)
sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(
testfile, win32security.DACL_SECURITY_INFORMATION, sd)
filebeat = self.start_beat()
self.wait_until(
lambda: self.log_contains("permission denied"),
max_timeout=15)
filebeat.check_kill_and_wait()
os.chmod(testfile, 0o755)
assert False == os.path.isfile(
os.path.join(self.working_dir, "output/filebeat"))
| 30.032887
| 99
| 0.560003
|
9a140c090b6f9c8db1423477d3fb76354eada7f5
| 9,862
|
py
|
Python
|
test/new_tests/test_query_predexp.py
|
mcoberly2/aerospike-client-python
|
d405891f0d6d8b2fc14f78841370bc6a1d302494
|
[
"Apache-2.0"
] | null | null | null |
test/new_tests/test_query_predexp.py
|
mcoberly2/aerospike-client-python
|
d405891f0d6d8b2fc14f78841370bc6a1d302494
|
[
"Apache-2.0"
] | null | null | null |
test/new_tests/test_query_predexp.py
|
mcoberly2/aerospike-client-python
|
d405891f0d6d8b2fc14f78841370bc6a1d302494
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import sys
from .test_base_class import TestBaseClass
from .as_status_codes import AerospikeStatus
from aerospike import exception as e
from aerospike import predicates as p
from aerospike import predexp as as_predexp
from threading import Lock
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestQuery(TestBaseClass):
def setup_class(cls):
client = TestBaseClass.get_new_connection()
try:
client.index_integer_create('test', 'demo', 'test_age',
'age_index')
except e.IndexFoundError:
pass
try:
client.index_string_create('test', 'demo', 'addr',
'addr_index')
except e.IndexFoundError:
pass
try:
client.index_integer_create('test', 'demo', 'age1',
'age_index1')
except e.IndexFoundError:
pass
try:
client.index_list_create('test', 'demo', 'numeric_list',
aerospike.INDEX_NUMERIC,
'numeric_list_index')
except e.IndexFoundError:
pass
try:
client.index_list_create('test', 'demo', 'string_list',
aerospike.INDEX_STRING,
'string_list_index')
except e.IndexFoundError:
pass
try:
client.index_map_keys_create('test', 'demo', 'numeric_map',
aerospike.INDEX_NUMERIC,
'numeric_map_index')
except e.IndexFoundError:
pass
try:
client.index_map_keys_create('test', 'demo', 'string_map',
aerospike.INDEX_STRING,
'string_map_index')
except e.IndexFoundError:
pass
try:
client.index_map_values_create('test', 'demo', 'numeric_map',
aerospike.INDEX_NUMERIC,
'numeric_map_values_index')
except e.IndexFoundError:
pass
try:
client.index_map_values_create('test', 'demo', 'string_map',
aerospike.INDEX_STRING,
'string_map_values_index')
except e.IndexFoundError:
pass
try:
client.index_integer_create('test', None, 'test_age_none',
'age_index_none')
except e.IndexFoundError:
pass
try:
client.index_integer_create('test', 'demo',
bytearray("sal\0kj", "utf-8"),
'sal_index')
except e.IndexFoundError:
pass
client.close()
def teardown_class(cls):
client = TestBaseClass.get_new_connection()
policy = {}
try:
client.index_remove('test', 'age_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'age_index1', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'addr_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'numeric_list_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'string_list_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'numeric_map_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'string_map_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'numeric_map_values_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'string_map_values_index', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'age_index_none', policy)
except e.IndexNotFound:
pass
try:
client.index_remove('test', 'sal_index')
except e.IndexNotFound:
pass
client.close()
@pytest.fixture(autouse=True)
def setup_method(self, request, as_connection):
"""
Setup method.
"""
for i in range(5):
key = ('test', 'demo', i)
rec = {
'name': 'name%s' % (str(i)),
'addr': 'name%s' % (str(i)),
'numeric_list': [i, i + 1, i + 2],
'string_list': ["str" + str(i), "str" + str(i + 1),
"str" + str(i + 2)],
'numeric_map': {"a": i,
"b": i + 1,
"c": i + 2},
'string_map': {
"a": "a" + str(i),
"b": "b" + str(i + 1),
"c": "c" + str(i + 2)
},
'test_age_none': 1,
'test_age': i,
'no': i
}
as_connection.put(key, rec)
for i in range(5, 10):
key = ('test', 'demo', i)
rec = {
u'name': 'name%s' % (str(i)),
u'addr': u'name%s' % (str(i)),
u'test_age': i,
u'no': i
}
as_connection.put(key, rec)
key = ('test', 'demo', 122)
llist = [{"op": aerospike.OPERATOR_WRITE,
"bin": bytearray("sal\0kj", "utf-8"),
"val": 80000}]
as_connection.operate(key, llist)
key = ('test', None, 145)
rec = {'test_age_none': 1}
as_connection.put(key, rec)
def teardown():
"""
Teardown method.
"""
for i in range(10):
key = ('test', 'demo', i)
as_connection.remove(key)
key = ('test', 'demo', 122)
as_connection.remove(key)
key = ('test', None, 145)
as_connection.remove(key)
request.addfinalizer(teardown)
def test_query_with_results_method_and_predexp(self):
"""
Invoke query() with correct arguments
"""
from .test_base_class import TestBaseClass
if TestBaseClass.major_ver >= 5 and TestBaseClass.minor_ver >=7:
# print("TestBaseClass.major_ver:", TestBaseClass.major_ver, "TestBaseClass.minor_ver:", TestBaseClass.minor_ver)
pytest.skip(
'It deprecated and it only applies to < 5.7 earlier and enterprise edition')
predexp = [
as_predexp.integer_bin('test_age'),
as_predexp.integer_value(1),
as_predexp.integer_equal()
]
policy = {
'predexp': predexp
}
query = self.as_connection.query('test', 'demo')
query.select('name', 'test_age')
records = query.results(policy)
assert len(records) == 1
def test_query_with_results_method_and_invalid_predexp(self):
"""
Invoke query() with correct arguments
"""
predexp = [
as_predexp.integer_bin('test_age'),
as_predexp.integer_value('1'),
as_predexp.integer_equal()
]
policy = {
'predexp': predexp
}
query = self.as_connection.query('test', 'demo')
query.select('name', 'test_age')
with pytest.raises(e.ParamError):
query.results(policy)
def test_query_with_correct_parameters_predexp(self):
"""
Invoke query() with correct arguments and using predexp
"""
from .test_base_class import TestBaseClass
if TestBaseClass.major_ver >= 5 and TestBaseClass.minor_ver >=7:
# print("TestBaseClass.major_ver:", TestBaseClass.major_ver, "TestBaseClass.minor_ver:", TestBaseClass.minor_ver)
pytest.skip(
'It deprecated and it only applies to < 5.7 earlier and enterprise edition')
predexp = [
as_predexp.integer_bin('test_age'),
as_predexp.integer_value(4),
as_predexp.integer_equal(),
]
query = self.as_connection.query('test', 'demo')
query.select('name', 'test_age')
#query.where(predicate)
records = []
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback, {'predexp': predexp})
assert len(records) == 1
assert records[0]['test_age'] == 4
@pytest.mark.parametrize(
"func",
[
as_predexp.integer_value,
as_predexp.predexp_and,
as_predexp.predexp_or,
as_predexp.rec_digest_modulo,
])
def test_with_wrong_predicate_argument_type_expecting_int(self, func):
'''
These functions all expect an integer argument, call with a string
'''
predexps = [
func("five")
]
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query = self.as_connection.query('test', 'demo')
with pytest.raises(e.ParamError):
query.foreach(callback, {'predexp': predexps})
| 30.722741
| 125
| 0.502231
|
3b93df794c0218674bff1af2cfca7742e39ed940
| 1,056
|
py
|
Python
|
pyxel/ui/text_button.py
|
roliveira/pyxel
|
7725639172cf29861b6e924a5b0103d66277086f
|
[
"MIT"
] | 1
|
2019-10-18T01:54:10.000Z
|
2019-10-18T01:54:10.000Z
|
pyxel/ui/text_button.py
|
Shiozaki-s21/pyxel
|
8938d656fccf1c87d56fa4187e9d6b242b132c75
|
[
"MIT"
] | null | null | null |
pyxel/ui/text_button.py
|
Shiozaki-s21/pyxel
|
8938d656fccf1c87d56fa4187e9d6b242b132c75
|
[
"MIT"
] | null | null | null |
import pyxel
from pyxel.constants import FONT_HEIGHT, FONT_WIDTH
from .button import Button
from .constants import (
BUTTON_DISABLED_COLOR,
BUTTON_ENABLED_COLOR,
BUTTON_PRESSED_COLOR,
BUTTON_TEXT_COLOR,
)
class TextButton(Button):
"""
Events:
__on_press()
__on_release()
"""
def __init__(self, parent, x, y, text, **kwargs):
super().__init__(
parent, x, y, len(text) * FONT_WIDTH + 3, FONT_HEIGHT + 1, **kwargs
)
self._text = text
self.add_event_handler("draw", self.__on_draw)
def __on_draw(self):
x1 = self.x
y1 = self.y
x2 = self.x + self.width - 1
y2 = self.y + self.height - 1
col = (
BUTTON_PRESSED_COLOR
if self.is_pressed
else (BUTTON_ENABLED_COLOR if self.is_enabled else BUTTON_DISABLED_COLOR)
)
pyxel.rect(x1 + 1, y1, x2 - 1, y2, col)
pyxel.rect(x1, y1 + 1, x2, y2 - 1, col)
pyxel.text(x1 + 2, y1 + 1, self._text, BUTTON_TEXT_COLOR)
| 24.55814
| 85
| 0.586174
|
f13805aab996de9a24d316b9b92683a7fd905bdc
| 5,690
|
py
|
Python
|
PREPROCESSING/gaussian_multivariate.py
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | 4
|
2021-08-20T18:21:09.000Z
|
2022-01-12T09:30:29.000Z
|
PREPROCESSING/gaussian_multivariate.py
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | null | null | null |
PREPROCESSING/gaussian_multivariate.py
|
Vicomtech/STDG-evaluation-metrics
|
4662c2cc60f7941723a876a6032b411e40f5ec62
|
[
"MIT"
] | null | null | null |
#Load libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
class DataPreProcessor :
"""
A class used to preprocess train data and transform generated data from GM approach
...
Attributes
----------
int_features : list
a list with the integer columns names of the dataframe
float_features : list
a list with the float columns names of the dataframe
categorical_columns : list
a list with the categorical columns names of the dataframe
train_data : pandas.core.frame.DataFrame
a dataframe that contains the data to be preprocessed for training GM approach
categorical_encoders : dict
a dictionary that contains one label encoder per each categorical attribute
one_hot_encoders : dict
a dictionary that contains one one-hot encoder per each categorical attribute
encoded_vars : dict
a dictionary that contains the value of the encoded categorical attributes
Methods
----------
preprocess_train_data()
Preprocess the train data
transform_data(generated_samples)
Tranforms the generated samples according to the encoders trained with train data to obtain synthetic data
"""
def __init__(self, data) :
"""
Parameters
----------
data : pandas.core.frame.DataFrame
data to be preprocessed
"""
#initialize the list of int, float and categorical attributes
self.int_features = data.select_dtypes(include=['int64']).columns.tolist()
self.float_features = data.select_dtypes(include=['float64']).columns.tolist()
self.categorical_columns = data.select_dtypes(include=['category']).columns.tolist()
self.train_data = data
#initialize the dictionaries that will contain the encoders for each categorical attribute
self.categorical_encoders = dict()
self.one_hot_encoders = dict()
self.encoded_vars = dict()
def preprocess_train_data(self) :
"""Preprocess the train data
Returns
-------
pandas.core.frame.DataFrame
a dataframe with the preprocessed data
"""
#copy the train data
data = self.train_data.copy()
#iterate over all categorical columns
for column in self.categorical_columns :
#fit one-hot encoder for the attribute and transform it
self.one_hot_encoders[column] = OneHotEncoder().fit(np.asarray(data[column].astype('category')).reshape(-1,1))
self.encoded_vars[column] = self.one_hot_encoders[column].transform(np.asarray(data[column].astype('category')).reshape(-1,1)).toarray()
data = data.drop([column], axis=1)
#compute the inverse sigmoid function for each one-hot encoded column
for i in range(0,self.encoded_vars[column].shape[1]) :
data[column + str(i)] = self.encoded_vars[column][:,i]
data[column + str(i)] = data[column + str(i)].astype('int8')
data[column + str(i)] = np.exp(np.asarray(data[column + str(i)].values)) / (1 + np.exp(np.asarray(data[column + str(i)].values)))
#return the preprocessed data
return data
def transform_data(self, generated_samples) :
"""Tranforms the generated samples according to the encoders trained with train data to obtain synthetic data
Parameters
----------
generated_samples : pandas.core.frame.DataFrame
a dataframe with the generated data to be transformed
Returns
-------
pandas.core.frame.DataFrame
a dataframe with the transformed data
"""
#convert the integer attributes of the dataframe
for c in self.int_features :
generated_samples[c] = generated_samples[c].astype('int64')
synthetic_data = generated_samples.select_dtypes(include=['int64'])
#convert the float attributes of the dataframe
for c in self.float_features :
generated_samples[c] = generated_samples[c].astype('float64')
synthetic_data = generated_samples.select_dtypes(include=['float64'])
#transform categorical features to original features types
for col in self.categorical_columns :
#get the obtained numerical values of each categorical attribute encoded group
cols_drop = (generated_samples.filter(regex=col)).columns.tolist()
values = generated_samples.filter(regex=col).values
generated_samples = generated_samples.drop(cols_drop, axis = 1)
#iterate over all values of assign a 1 to the maximum value row, to the rest it gives a value of 0
for i in range(0,values.shape[0]) :
m = max(values[i,:])
for j, k in enumerate(values[i,:]) :
if k == m :
values[i,j] = 1
else :
values[i,j] = 0
#perform the inverse one-hot encoding of the categorical attribute
generated_samples[col] = self.one_hot_encoders[col].inverse_transform(values)
#sort the attributes of the dataframe to be in the same order as in real train data
synthetic_data = pd.DataFrame(columns = self.train_data.columns)
for col in self.train_data.columns :
synthetic_data[col] = generated_samples[col]
#return the transformed synthetic dataframe
return synthetic_data
| 39.79021
| 148
| 0.648506
|
c9472a5e5f1e300a3d492658b1898aa1a0d23b71
| 36,455
|
py
|
Python
|
moi/views.py
|
UmakantKulkarni/free5gmano
|
1772bf64f5882435090f0419307ba2e3fa730794
|
[
"Apache-2.0"
] | null | null | null |
moi/views.py
|
UmakantKulkarni/free5gmano
|
1772bf64f5882435090f0419307ba2e3fa730794
|
[
"Apache-2.0"
] | null | null | null |
moi/views.py
|
UmakantKulkarni/free5gmano
|
1772bf64f5882435090f0419307ba2e3fa730794
|
[
"Apache-2.0"
] | null | null | null |
import ast
import threading
import datetime
import base64
import requests
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import action
from rest_framework.parsers import JSONParser
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from django.apps import apps
from moi.models import *
from moi.serializers import *
from moi.enums import Modification, Scope, OperationStatus
from free5gmano.settings import THREAD_POOL
from nssmf.models import SliceTemplate
from nssmf.serializers import SliceTemplateRelationSerializer
from rest_framework.response import Response
class TaskThread(threading.Thread):
def __init__(self, time_tick, callback_uri, notify_data):
super().__init__()
self.time_tick = time_tick
self.callback_uri = callback_uri
self.notify_data = notify_data
self.stop = False
def run(self):
import time
# Get MOI values list
model = apps.get_model('moi', self.notify_data['objectClass'])
pre_value_list = list()
object_queryset = model.objects.values()
for query in object_queryset:
if str(query[model._meta.pk.name]) in self.notify_data['objectInstanceInfos']:
pre_value_list.append(query)
if self.notify_data['notificationType'] == 'notifyMOIDeletion':
while 1:
time.sleep(self.time_tick)
object_queryset = model.objects.values()
condition = list()
for query in object_queryset:
condition.append(str(query[model._meta.pk.name]) in
self.notify_data['objectInstanceInfos'])
if not any(condition):
# Notify calluri
print('Delete Notify')
header = {"Content-Type": "application/vnd.kafka.v1+json"}
data = {
"notificationId": self.notify_data['notificationId'],
"notificationType": self.notify_data['notificationType']
}
b64_data = base64.b64encode(str(data).encode())
_data = {"records": [{"value": b64_data.decode()}]}
requests.post(url=self.callback_uri, json=_data, headers=header)
break
elif self.notify_data['notificationType'] == 'notifyMOIAttributeValueChanges':
while not self.stop:
time.sleep(self.time_tick)
value_list = list()
object_queryset = model.objects.values()
for query in object_queryset:
if str(query[model._meta.pk.name]) in self.notify_data['objectInstanceInfos']:
value_list.append(query)
for i in range(len(value_list)):
if value_list[i] == pre_value_list[i]:
pass
else:
print('Change MOI')
# Notify calluri
header = {"Content-Type": "application/vnd.kafka.v1+json"}
data = {
"notificationId": self.notify_data['notificationId'],
"notificationType": self.notify_data['notificationType']
}
b64_data = base64.b64encode(str(data).encode())
_data = {"records": [{"value": b64_data.decode()}]}
requests.post(url=self.callback_uri, json=_data, headers=header)
pre_value_list = value_list
class MultipleSerializerViewSet(ModelViewSet):
def get_serializer_class(self):
if self.action in ('list', 'retrieve'):
return SubscriptionRetrieveSerializer
return SubscriptionSerializer
class ObjectManagement(ModelViewSet):
queryset = NetworkSliceSubnet.objects.all()
serializer_class = NetworkSliceSubnetSerializer
@csrf_exempt
@action(detail=True, methods='PUT', name='createMOI')
def create_moi(self, request, **kwargs):
global moi_object_serializer
data = JSONParser().parse(request)
response_data = {'status': 'OperationFailed'}
moi_serializer_class = globals()[kwargs['className'] + "Serializer"]
moi_object_serializer = moi_serializer_class(data=data['attributeListIn'])
if moi_object_serializer.is_valid():
moi_object_serializer.save()
response_data = {'status': 'OperationSucceeded',
'attributeListOut': moi_object_serializer.data}
return JsonResponse(response_data, status=201)
return JsonResponse(response_data, status=400)
@csrf_exempt
@action(detail=True, methods='GET', name='getMOIAttributes')
def get_moi_attributes(self, request, **kwargs):
global moi_object
global moi_object_serializer
response_data = {'status': 'OperationFailed'}
scope_seq = ast.literal_eval(request.query_params.get('scope'))
moi_model_class = globals()[kwargs['className']]
moi_serializer_class = globals()[kwargs['className'] + "Serializer"]
if not Scope.has_value(scope_seq[0]):
return JsonResponse(response_data, status=400)
moi_serializer_class.Meta.depth = get_scope_level(scope_seq[0], scope_seq[1])
try:
if kwargs['id'] == "*" and request.query_params.get('filter') is None:
moi_object = moi_model_class.objects.all()
elif request.query_params.get('filter') is None or \
request.query_params.get('filter') == "":
_id = kwargs['id'].replace('-', '')
moi_object = moi_model_class.objects.extra(
where=[moi_model_class._meta.pk.verbose_name + "='" + _id + "'"])
else:
moi_object = moi_model_class.objects.extra(
where=[request.query_params.get('filter')])
moi_object_serializer = moi_serializer_class(moi_object, many=True)
except moi_model_class.DoesNotExist:
return JsonResponse(response_data, status=400)
response_data = {'status': 'OperationSucceeded',
'attributeListOut': moi_object_serializer.data}
return JsonResponse(response_data, status=200)
# return response_cors(request.method, JsonResponse(response_data, status=100))
@csrf_exempt
@action(detail=True, methods='PATCH', name='modifyMOIAttributes')
def modify_moi_attributes(self, request, **kwargs):
global moi_object
global moi_object_serializer
response_data = {'status': 'OperationFailed'}
scope_seq = ast.literal_eval(request.query_params.get('scope'))
moi_model_class = globals()[kwargs['className']]
moi_serializer_class = globals()[kwargs['className'] + "Serializer"]
if not Scope.has_value(scope_seq[0]):
return JsonResponse(response_data, status=400)
moi_serializer_class.Meta.depth = get_scope_level(scope_seq[0], scope_seq[1])
if len(request.data['modificationList']) == 2 and not Modification.has_value(
request.data['modificationList'][1]):
return JsonResponse(response_data, status=400)
elif len(request.data['modificationList']) == 3 and not Modification.has_value(
request.data['modificationList'][2]):
return JsonResponse(response_data, status=400)
try:
if kwargs['id'] == "*" and request.query_params.get('filter') is None:
moi_object = moi_model_class.objects.all()
elif request.query_params.get('filter') is None or \
request.query_params.get('filter') == "":
_id = kwargs['id'].replace('-', '')
moi_object = moi_model_class.objects.extra(
where=[moi_model_class._meta.pk.verbose_name + "='" + _id + "'"])
else:
moi_object = moi_model_class.objects.extra(
where=[request.query_params.get('filter')])
moi_object_serializer = moi_serializer_class(moi_object, many=True)
operator_index = len(request.data['modificationList']) - 1
moi_object_serializer = moi_serializer_class(moi_object, many=True)
response_data = {'status': 'OperationSucceeded',
'attributeListOut': moi_object_serializer.data}
update(moi_object, request.data, request.data['modificationList'][operator_index])
except moi_model_class.DoesNotExist:
return JsonResponse(response_data, status=400)
except Exception as ex:
print(ex)
return JsonResponse(response_data, status=400)
return JsonResponse(response_data, status=200)
@csrf_exempt
@action(detail=True, methods='DELETE', name='deleteMOI')
def delete_moi(self, request, **kwargs):
global moi_object
global moi_object_serializer
response_data = {'status': 'OperationFailed'}
scope_seq = ast.literal_eval(request.query_params.get('scope'))
moi_model_class = globals()[kwargs['className']]
moi_serializer_class = globals()[kwargs['className'] + "Serializer"]
if not Scope.has_value(scope_seq[0]):
return JsonResponse(response_data, status=400)
moi_serializer_class.Meta.depth = get_scope_level(scope_seq[0], scope_seq[1])
try:
if kwargs['id'] == "*" and request.query_params.get('filter') is None:
moi_object = moi_model_class.objects.all()
elif request.query_params.get('filter') is None or \
request.query_params.get('filter') == "":
_id = kwargs['id'].replace('-', '')
moi_object = moi_model_class.objects.extra(
where=[moi_model_class._meta.pk.verbose_name + "='" + _id + "'"])
else:
moi_object = moi_model_class.objects.extra(
where=[request.query_params.get('filter')])
moi_object_serializer = moi_serializer_class(moi_object, many=True)
except moi_model_class.DoesNotExist:
return JsonResponse(response_data, status=400)
response_data = {'status': 'OperationSucceeded', 'deletionList': moi_object_serializer.data}
moi_object.delete()
return JsonResponse(response_data, status=200)
def subscribe_moi(self):
pass
def unsubscribe_moi(self):
pass
def get_scope_level(level_selection, level):
if level_selection == 'BASE_ONLY':
return 0
elif level_selection == 'BASE_NTH_LEVEL':
return level
elif level_selection == 'BASE_SUBTREE':
return level + 1
elif level_selection == 'BASE_ALL':
return 10
def response_cors(method_type, response):
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = method_type
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def update(moi_object_data, data, operator):
global modification_list
global moi_object_list
if operator != 'SET_TO_DEFAULT':
modification_list = dict()
for i in range(data['modificationList'][0].__len__()):
modification_list[data['modificationList'][0][i]] = data['modificationList'][1][i]
if operator == 'REPLACE':
moi_object_data.update(**modification_list)
elif operator == 'ADD_VALUES':
moi_object_list = [nssi for nssi in moi_object_data]
for i in range(moi_object_list.__len__()):
for j in range(data['modificationList'][0].__len__()):
str_list = list(data['modificationList'][0][j])
str_list[0] = str_list[0].upper()
model_class_name = "".join(str_list)
moi_object_list[i].__getattribute__(data['modificationList'][0][j]).add(
globals()[model_class_name].objects.get(pk=data['modificationList'][1][j]))
elif operator == 'REMOVE_VALUES':
moi_object_list = [moi for moi in moi_object_data]
for i in range(moi_object_list.__len__()):
for j in range(data['modificationList'][0].__len__()):
str_list = list(data['modificationList'][0][j])
str_list[0] = str_list[0].upper()
model_class_name = "".join(str_list)
moi_object_list[i].__getattribute__(data['modificationList'][0][j]).remove(
globals()[model_class_name].objects.get(pk=data['modificationList'][1][j]))
elif operator == 'SET_TO_DEFAULT':
moi_object_list = [nssi for nssi in moi_object_data]
for i in range(moi_object_list.__len__()):
default_value = moi_object_data.model._meta.get_field(
data['modificationList'][0][i]).default
modification_list = {
data['modificationList'][0][i]:
default_value
}
moi_object_data.update(**modification_list)
class SubscriptionView(MultipleSerializerViewSet):
""" Subscription Information
"""
queryset = Subscription.objects.all()
serializer_class = MultipleSerializerViewSet.get_serializer_class
thread_pool = dict()
def list(self, request, *args, **kwargs):
"""
Query Subscription information.
The GET method queries the information of the Subscription matching the filter.
"""
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
"""
Create a new individual Subscription resource.
The POST method creates a new individual Subscription resource.
"""
response = super().create(request, *args, **kwargs)
for notification in request.data['filter']:
notify_obj = CommonNotification.objects.get(notificationId=notification)
notify_data = {
"notificationId": notify_obj.notificationId,
"notificationType": notify_obj.notificationType,
"objectInstanceInfos": eval(notify_obj.objectInstanceInfos),
"objectClass": notify_obj.objectClass,
"additionalText": eval(notify_obj.additionalText)
}
self.thread_pool[notification] = TaskThread(request.data['timeTick'],
request.data['callbackUri'],
notify_data)
self.thread_pool[notification].start()
return response
def retrieve(self, request, *args, **kwargs):
"""
Read information about an individual Subscription resource.
The GET method reads the information of a Subscription.
"""
return super().retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
Update information about an individual Subscription resource.
The PATCH method updates the information of a Subscription.
"""
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
"""
Delete an individual Subscription.
The DELETE method deletes an individual Subscription resource.
"""
for notification in self.get_object().filter.all():
if str(notification.notificationId) in self.thread_pool:
task = self.thread_pool[str(notification.notificationId)]
task.stop = True
return super().destroy(request, *args, **kwargs)
class NotificationView(ModelViewSet):
""" Notification Information
"""
queryset = CommonNotification.objects.all()
serializer_class = CommonNotificationSerializer
def check(self, request):
# Check objectInstanceInfos (type: list) is exist in other CommonNotification
notify_queryset = \
self.queryset.filter(notificationType=request.data['notificationType'])
for query in request.data['objectInstanceInfos']:
notify_queryset = notify_queryset.filter(
objectInstanceInfos__contains=query)
# Check additionalText (type: dict) is exist in other CommonNotification
for query in notify_queryset:
if eval(query.additionalText) == request.data['additionalText']:
return notify_queryset
notify_queryset = list()
return notify_queryset
def list(self, request, *args, **kwargs):
"""
Query Notification information.
The GET method queries the information of the Notification matching the filter.
"""
response = list()
queryset = self.filter_queryset(self.get_queryset())
for query in queryset.values():
query['objectInstanceInfos'] = eval(query['objectInstanceInfos'])
query['additionalText'] = eval(query['additionalText'])
response.append(query)
return Response(response)
def create(self, request, *args, **kwargs):
"""
Create a new individual Notification resource.
The POST method creates a new individual Notification resource.
"""
response = {'status': OperationStatus.OperationFailedExistingSubscription}
notify_queryset = self.check(request)
if notify_queryset:
return Response(response, status=500)
request.data['objectInstanceInfos'] = str(request.data['objectInstanceInfos'])
request.data['additionalText'] = str(request.data['additionalText'])
super().create(request, *args, **kwargs)
return Response(status=201)
def retrieve(self, request, *args, **kwargs):
"""
Read information about an individual Notification resource.
The GET method reads the information of a Notification.
"""
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
def update(self, request, *args, **kwargs):
"""
Update information about an individual Notification resource.
The PATCH method updates the information of a Notification.
"""
response = {'status': OperationStatus.OperationFailedExistingSubscription}
notify_queryset = self.check(request)
if notify_queryset:
return Response(response, status=500)
request.data['objectInstanceInfos'] = str(request.data['objectInstanceInfos'])
request.data['additionalText'] = str(request.data['additionalText'])
super().update(request, *args, **kwargs)
return Response(status=200)
def destroy(self, request, *args, **kwargs):
"""
Delete an individual Notification.
The DELETE method deletes an individual Notification resource.
"""
return super().destroy(request, *args, **kwargs)
class TopologyView(GenericViewSet):
""" Topology Information
"""
queryset = NetworkSliceSubnet.objects.all()
serializer_class = NetworkSliceSubnetTopologySerializer
def list(self, request):
"""
Query Topology information.
The GET method queries the information of the Topology matching the filter.
"""
queryset = self.filter_queryset(self.get_queryset())
serializer = self.serializer_class(queryset, many=True)
nsinfo_object = serializer.data
response_list = list()
link_count = 0
for element in nsinfo_object:
response = {
"links": [],
"nodes": []
}
if element['nsInfo']:
# Consist topology Network Slice Subnet Instance
if element['nsInfo']['nsInstanceName'] != None:
response['nodes'].append({
"id": element['nssiId'],
"name": element['nsInfo']['nsInstanceName'],
"symbolSize": 10,
"symbol": "roundRect",
"attributes": {
"modularity_class": 0
}
})
else:
response['nodes'].append({
"id": element['nssiId'],
"name": element['nsInfo']['nsInstanceDescription'],
"symbolSize": 10,
"symbol": "roundRect",
"attributes": {
"modularity_class": 0
}
})
nsinfo = eval(element['nsInfo']['vnfInstance'])
if 'nsInstanceName' in nsinfo:
for _ in nsinfo:
addresses = str()
cp_id = str()
vnf_state =_['instantiatedVnfInfo']['vnfState']
for extCpInfo in _['instantiatedVnfInfo']['extCpInfo']:
cp_id = extCpInfo['id']
cp_protocol_info = extCpInfo['cpProtocolInfo']
ip_over_ethernet = cp_protocol_info[0]['ipOverEthernet']
ip_addresses = ip_over_ethernet['ipAddresses']
if ip_addresses[0]['isDynamic']:
addresses = ip_addresses[0]['addresses']
# Consist topology VNF Instance
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "triangle",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
# Consist topology relation VNF Instance <-> Network Service Instance
response['links'].append({
"id": str(link_count),
"source": element['nssiId'],
"target": _['id']
})
link_count += 1
else:
print('Tacker Topology')
for _ in nsinfo:
response['nodes'].append({
"id": nsinfo[_],
"name": _,
"instantiationState": None,
"vnfState": None,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/p-qlD6cG49XFnGtZVmrtr7TfmdEjMSkBYkVZvl_Al6xC1pK87EGDUhoo2EcJBHY6DKIPLE8P9PxqF_ps1AFnu4P5DSFQdbEAUd_QYbzmF_Iu1Xs7XZ3umSpDD3VibL3fKJ9GicqQew=s315-p-k",
"attributes": {
"modularity_class": 1
},
"address": None
})
response['links'].append({
"id": str(link_count),
"source": element['nssiId'],
"target": nsinfo[_]
})
link_count += 1
response_list.append(response)
return response_cors(request.method, JsonResponse(response_list, safe=False))
def retrieve(self, request, *args, **kwargs):
"""
Read information about an individual Topology resource.
The GET method reads the information of a Topology.
"""
instance = self.get_object()
serializer = self.get_serializer(instance)
nsinfo_object = serializer.data
response = {
"links": [],
"nodes": []
}
link_count = 0
if nsinfo_object['nsInfo']:
if nsinfo_object['nsInfo']['nsInstanceName'] != None:
response['nodes'].append({
"id": nsinfo_object['nssiId'],
"name": nsinfo_object['nsInfo']['nsInstanceName'],
"symbolSize": 10,
"symbol": "roundRect",
"attributes": {
"modularity_class": 0
}
})
else:
response['nodes'].append({
"id": nsinfo_object['nssiId'],
"name": nsinfo_object['nsInfo']['nsInstanceDescription'],
"symbolSize": 10,
"symbol": "roundRect",
"attributes": {
"modularity_class": 0
}
})
nsinfo = eval(nsinfo_object['nsInfo']['vnfInstance'])
if 'nsInstanceName' in nsinfo:
for _ in nsinfo:
addresses = str()
cp_id = str()
vnf_state =_['instantiatedVnfInfo']['vnfState']
for extCpInfo in _['instantiatedVnfInfo']['extCpInfo']:
cp_id = extCpInfo['id']
cp_protocol_info = extCpInfo['cpProtocolInfo']
ip_over_ethernet = cp_protocol_info[0]['ipOverEthernet']
ip_addresses = ip_over_ethernet['ipAddresses']
if ip_addresses[0]['isDynamic']:
addresses = ip_addresses[0]['addresses']
# Consist topology VNF Instance
if _['vnfProductName'] == "upf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/tDq5MNIeqdyUibCoHGUFhFvTi5JSM6-PZ6qec5_yBrGx0fBELl0tYrnWcvOn3TpLeWzcP-qxISW9BHYvFkF6CMREi-tJcmO2eMxLTgPvSBSYX8MZWWjNJd6WFQF-iEXW7oWy476RVA=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "hss":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/6D1Mz9o4gBb4g-MSN4p0mKCWz4-PXk-K_ZkAcTQLR1YUyS_PCX-pORu6X9uyRJ_Ve1GlBX4ZL2Bb00sdymga2jRcCOG3nPPVte4JBeoW8cQxaju4BuNFhSkKAeXB0OxYW2HUVEXSHg=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "amf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/4lyf2bW5dKmJ9ygvXL6a9nd8SNP1RABQtcsS6nFLWyeb3W3y27gay4ujcPmmFhKj737C60IZoUcfnn8eUosl_h_qQoIMnQJmbssSwkQ4I3rC8lReRSfcZjuGbj8Xpgpb9PS2nvYWew=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "smf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/i5IXbaptWGdvp74uIEzVfN6nMu6YmblUSA_iPy68sD1FOL31VZuvuE0RQB83iQ-CxptFdkM-ku1ey7tSVzaro2jjIBTIOOpfNzEQA_f84YeJwbP1Fwr7xJOB_r6Tls99c5iOO3WAPg=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "mongodb":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/xwIxuvxi4_wFwaMEw8mi2FqpI1K_SCGy1DGQsedj-aYAfPvjkEtYFmjKu_nrBJck-WxhcJifG6QdC4PY5jdqt3zkIER058P0f1QLS-rvdwOeOkmz9OaZeRLppwd4k3YyJgl36aiq-A=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "webui":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/_p7-L94qZANyy4rN_ygdTHYJU7aYLwAUok5EY5VhdbJhESShkMAcctymFhYzX3nM9MccqG2hJLrJ1618ZMz2fWefgz0_RTPl8LWvhb3eNoziJpHHwTai0t8xymvS3JRmjFGuqoFJQA=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "pcrf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/dp_CZZ6Nn3AKb1WUsawXnvWcIWxz4iXvn4vx0wGjidaV1wSTld3CPfGAZTc-8RqLIX-xeodWKAzuHO5btB37PMbJFYu3J7cwuXD2ya2w0U9D4bIazhK4SrABzr8x-8wRHkz0iI_1fA=w2400",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "ausf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/-R2wrpgEVBdXZfpujbLQkhuIWgaHQi4Vka-BLLDxzX8J4XYNRF-HJx3TsAoBXQHuskFJveYx9v1lQij37730EJKUraPlR2mWYt7OLoa8m1bmH2coQzAN2WGGo3htq6GdJyNkJHLUnA=s314-p-k",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "nssf":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/mYCmjpPMPhdWG_34KLVqEeTUM5DXQ8u1EK4lMCyiXaa4W-fCioeNgxbgzuQS8j-vcCn6Cnh2r7zGNNpdnAA3VjzgPykGrHbPCvM3NfMzgxf_1lW379FEkcOjqNMa1QzVUSHEam2ykw=s314-p-k",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "udm":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/iSgkP7Bfr0HWSSUkEt521Ka6rhbn9PmbNUb_fy0Ck3KD4Vn0YbV5egWqaqfGfvMk87DLpLywheYa6BBzkeffMfJNdFRkbr3nBTd-kJRyEp0Dl29egXQz9Kkr-WeFO3CslXxX1cnJHw=s314-p-k",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
elif _['vnfProductName'] == "udr":
response['nodes'].append({
"id": _['id'],
"name": _['vnfProductName'],
"instantiationState": _['instantiationState'],
"vnfState": vnf_state,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/ygnGeIaEUK7Y4e5T96FBOgfWehLURMaIH6Ev_PKoOv1MbDZoH0lM_cHNkskRo9C1CpsMWsgqYaKuvk-xO-X0GtxNNZKphkaicPfztQhkzV_vZdndvrfQZIanbcALElNWroEHwef2yg=s314-p-k",
"attributes": {
"modularity_class": 1
},
"address": addresses
})
# Consist topology relation VNF Instance <-> Network Service Instance
response['links'].append({
"id": str(link_count),
"source": nsinfo_object['nssiId'],
"target": _['id']
})
link_count += 1
else:
print('Tacker Topology')
for _ in nsinfo:
response['nodes'].append({
"id": nsinfo[_],
"name": _,
"instantiationState": None,
"vnfState": None,
"symbolSize": 10,
"symbol": "image://https://lh3.googleusercontent.com/p-qlD6cG49XFnGtZVmrtr7TfmdEjMSkBYkVZvl_Al6xC1pK87EGDUhoo2EcJBHY6DKIPLE8P9PxqF_ps1AFnu4P5DSFQdbEAUd_QYbzmF_Iu1Xs7XZ3umSpDD3VibL3fKJ9GicqQew=s315-p-k",
"attributes": {
"modularity_class": 1
},
"address": None
})
response['links'].append({
"id": str(link_count),
"source": nsinfo_object['nssiId'],
"target": nsinfo[_]
})
link_count += 1
return response_cors(request.method, JsonResponse(response))
| 46.797176
| 230
| 0.531751
|
3d330abf56e025d08920aa71431c0e86b461b87d
| 1,712
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/pyeapi/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pyeapi/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/pyeapi/__init__.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__version__ = '0.6.1'
__author__ = 'Arista EOS+'
from .client import load_config, connect, connect_to, config_for
__all__ = ['load_config', 'connect', 'connect_to', 'config_for']
| 43.897436
| 74
| 0.773949
|
faa20962e7312a7f6da1557733d0d24548d43575
| 1,520
|
py
|
Python
|
geba_website/apps/forum/migrations/0001_initial.py
|
GeoffBarrett/geba_website
|
a8b520f6540200b4d085e93a3ac9ec766fd82af5
|
[
"MIT"
] | null | null | null |
geba_website/apps/forum/migrations/0001_initial.py
|
GeoffBarrett/geba_website
|
a8b520f6540200b4d085e93a3ac9ec766fd82af5
|
[
"MIT"
] | 15
|
2020-02-12T00:00:38.000Z
|
2022-03-11T23:43:44.000Z
|
geba_website/apps/forum/migrations/0001_initial.py
|
GeoffBarrett/geba_website
|
a8b520f6540200b4d085e93a3ac9ec766fd82af5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-09-28 02:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ForumPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('vote_score', models.FloatField(db_index=True, default=0)),
('num_vote_up', models.PositiveIntegerField(db_index=True, default=0)),
('num_vote_down', models.PositiveIntegerField(db_index=True, default=0)),
('slug', models.SlugField(unique=True)),
('publish_date', models.DateTimeField(blank=True, null=True)),
('title', models.CharField(max_length=200)),
('draft', models.BooleanField(default=False)),
('body', models.TextField()),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-vote_score', '-num_vote_up', '-publish_date', '-modified'],
},
),
]
| 40
| 131
| 0.601974
|
97381fd044ba5b1042987956aa91365e7f1643c0
| 54
|
py
|
Python
|
etabotsite/etabotapp/__init__.py
|
koykub333/pmp
|
b92d81da5f572cba868ebe7fe36b277138467bfc
|
[
"Apache-2.0"
] | 3
|
2018-07-05T00:04:59.000Z
|
2021-03-08T03:04:44.000Z
|
etabotsite/etabotapp/__init__.py
|
koykub333/pmp
|
b92d81da5f572cba868ebe7fe36b277138467bfc
|
[
"Apache-2.0"
] | 53
|
2018-06-04T06:09:23.000Z
|
2020-06-05T18:25:39.000Z
|
etabotsite/etabotapp/__init__.py
|
koykub333/pmp
|
b92d81da5f572cba868ebe7fe36b277138467bfc
|
[
"Apache-2.0"
] | 9
|
2018-04-15T19:42:35.000Z
|
2021-03-08T02:51:47.000Z
|
default_app_config = 'etabotapp.apps.EtabotappConfig'
| 27
| 53
| 0.851852
|
b95b059c5ec40bbb0515cd88920cf377200e9e02
| 339
|
py
|
Python
|
simp-py-code/max_num.py
|
Michaeloye/python-journey
|
ff8ce0e8796a129994f9a9a9dbb80340fa5790dc
|
[
"MIT"
] | null | null | null |
simp-py-code/max_num.py
|
Michaeloye/python-journey
|
ff8ce0e8796a129994f9a9a9dbb80340fa5790dc
|
[
"MIT"
] | null | null | null |
simp-py-code/max_num.py
|
Michaeloye/python-journey
|
ff8ce0e8796a129994f9a9a9dbb80340fa5790dc
|
[
"MIT"
] | null | null | null |
num1=int(input("Enter a number:"))
num2=int(input("Enter a number:"))
num3=int(input("Enter a number:"))
def max_num(num1, num2, num3):
if num1>=num2 and num1>=num3:
print(num1)
elif num2>=num1 and num2>=num3:
print(num2)
elif num3>=num1 and num3>=num2:
print(num3)
max_num(num1, num2, num3)
| 24.214286
| 35
| 0.610619
|
f2b5b88e8fe89da14eb90a81ffe9e34d1bfb05c7
| 2,161
|
py
|
Python
|
data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
data/migrations/versions/67f0abd172ae_add_tagtorepositorytag_table.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
"""
Add TagToRepositoryTag table.
Revision ID: 67f0abd172ae
Revises: 10f45ee2310b
Create Date: 2018-10-30 11:31:06.615488
"""
# revision identifiers, used by Alembic.
revision = "67f0abd172ae"
down_revision = "10f45ee2310b"
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(op, tables, tester):
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"tagtorepositorytag",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("repository_id", sa.Integer(), nullable=False),
sa.Column("tag_id", sa.Integer(), nullable=False),
sa.Column("repository_tag_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["repository_id"],
["repository.id"],
name=op.f("fk_tagtorepositorytag_repository_id_repository"),
),
sa.ForeignKeyConstraint(
["repository_tag_id"],
["repositorytag.id"],
name=op.f("fk_tagtorepositorytag_repository_tag_id_repositorytag"),
),
sa.ForeignKeyConstraint(
["tag_id"], ["tag.id"], name=op.f("fk_tagtorepositorytag_tag_id_tag")
),
sa.PrimaryKeyConstraint("id", name=op.f("pk_tagtorepositorytag")),
)
op.create_index(
"tagtorepositorytag_repository_id", "tagtorepositorytag", ["repository_id"], unique=False
)
op.create_index(
"tagtorepositorytag_repository_tag_id",
"tagtorepositorytag",
["repository_tag_id"],
unique=True,
)
op.create_index("tagtorepositorytag_tag_id", "tagtorepositorytag", ["tag_id"], unique=True)
# ### end Alembic commands ###
tester.populate_table(
"tagtorepositorytag",
[
("repository_id", tester.TestDataType.Foreign("repository")),
("tag_id", tester.TestDataType.Foreign("tag")),
("repository_tag_id", tester.TestDataType.Foreign("repositorytag")),
],
)
def downgrade(op, tables, tester):
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("tagtorepositorytag")
# ### end Alembic commands ###
| 32.742424
| 97
| 0.64137
|
12fbc31f51bba1dd741ddcb8e7d41feee016ab05
| 976
|
bzl
|
Python
|
closure/closure_proto_library.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | 1
|
2020-08-15T06:02:47.000Z
|
2020-08-15T06:02:47.000Z
|
closure/closure_proto_library.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | null | null | null |
closure/closure_proto_library.bzl
|
kalbasit/rules_proto_grpc
|
7e0a97adc8801df1cd74ee435d74bbd857c98a36
|
[
"Apache-2.0"
] | null | null | null |
load("//closure:closure_proto_compile.bzl", "closure_proto_compile")
load("@io_bazel_rules_closure//closure:defs.bzl", "closure_js_library")
def closure_proto_library(**kwargs):
# Compile protos
name_pb = kwargs.get("name") + "_pb"
closure_proto_compile(
name = name_pb,
**{k: v for (k, v) in kwargs.items() if k in ("deps", "verbose")} # Forward args
)
# Create closure library
closure_js_library(
name = kwargs.get("name"),
srcs = [name_pb],
deps = PROTO_DEPS,
visibility = kwargs.get("visibility"),
suppress = [
"JSC_LATE_PROVIDE_ERROR",
"JSC_UNDEFINED_VARIABLE",
"JSC_IMPLICITLY_NULLABLE_JSDOC",
"JSC_STRICT_INEXISTENT_PROPERTY",
"JSC_POSSIBLE_INEXISTENT_PROPERTY",
"JSC_UNRECOGNIZED_TYPE_ERROR",
"JSC_TYPE_MISMATCH",
],
)
PROTO_DEPS = [
"@io_bazel_rules_closure//closure/protobuf:jspb"
]
| 30.5
| 88
| 0.620902
|
60fd13a4d084b2af13fc729f02aa11dafa5ae06d
| 5,444
|
py
|
Python
|
steam/client/builtins/user.py
|
XronoZ-create/steam
|
3330d37cc2670faaaaf4d3e14c7c2b20712f0853
|
[
"MIT"
] | 1
|
2022-01-11T20:35:05.000Z
|
2022-01-11T20:35:05.000Z
|
steam/client/builtins/user.py
|
XronoZ-create/steam
|
3330d37cc2670faaaaf4d3e14c7c2b20712f0853
|
[
"MIT"
] | null | null | null |
steam/client/builtins/user.py
|
XronoZ-create/steam
|
3330d37cc2670faaaaf4d3e14c7c2b20712f0853
|
[
"MIT"
] | null | null | null |
from weakref import WeakValueDictionary
from steam.client.user import SteamUser
from steam.enums import EPersonaState, EChatEntryType, EType, EClientUIMode
from steam.enums.emsg import EMsg
from steam.core.msg import MsgProto
from steam.utils.proto import proto_fill_from_dict
class User(object):
EVENT_CHAT_MESSAGE = 'chat_message'
"""On new private chat message
:param user: steam user
:type user: :class:`.SteamUser`
:param message: message text
:type message: str
"""
persona_state = EPersonaState.Online #: current persona state
user = None #: :class:`.SteamUser` instance once logged on
current_games_played = [] #: :class:`list` of app ids currently being played
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self._user_cache = WeakValueDictionary()
self.on(self.EVENT_DISCONNECTED, self.__handle_disconnect)
self.on(self.EVENT_LOGGED_ON, self.__handle_set_persona)
self.on(EMsg.ClientPersonaState, self.__handle_persona_state)
self.on(EMsg.ClientFriendMsgIncoming, self.__handle_message_incoming)
self.on("FriendMessagesClient.IncomingMessage#1", self.__handle_message_incoming2)
def __handle_message_incoming(self, msg):
# old chat
if msg.body.chat_entry_type == EChatEntryType.ChatMsg:
user = self.get_user(msg.body.steamid_from)
self.emit("chat_message", user, msg.body.message.decode('utf-8'))
def __handle_message_incoming2(self, msg):
# new chat
if msg.body.chat_entry_type == EChatEntryType.ChatMsg:
user = self.get_user(msg.body.steamid_friend)
self.emit("chat_message", user, msg.body.message)
def __handle_disconnect(self):
self.user = None
self.current_games_played = []
def __handle_set_persona(self):
self.user = self.get_user(self.steam_id, False)
if self.steam_id.type == EType.Individual and self.persona_state != EPersonaState.Offline:
self.change_status(persona_state=self.persona_state)
def __handle_persona_state(self, message):
for friend in message.body.friends:
steamid = friend.friendid
if steamid in self._user_cache:
suser = self._user_cache[steamid]
suser._pstate = friend
suser._pstate_ready.set()
def change_status(self, **kwargs):
"""
Set name, persona state, flags
.. note::
Changing persona state will also change :attr:`persona_state`
:param persona_state: persona state (Online/Offline/Away/etc)
:type persona_state: :class:`.EPersonaState`
:param player_name: profile name
:type player_name: :class:`str`
:param persona_state_flags: persona state flags
:type persona_state_flags: :class:`.EPersonaStateFlag`
"""
if not kwargs: return
self.persona_state = kwargs.get('persona_state', self.persona_state)
message = MsgProto(EMsg.ClientChangeStatus)
proto_fill_from_dict(message.body, kwargs)
self.send(message)
def request_persona_state(self, steam_ids, state_flags=863):
"""Request persona state data
:param steam_ids: list of steam ids
:type steam_ids: :class:`list`
:param state_flags: client state flags
:type state_flags: :class:`.EClientPersonaStateFlag`
"""
m = MsgProto(EMsg.ClientRequestFriendData)
m.body.persona_state_requested = state_flags
m.body.friends.extend(steam_ids)
self.send(m)
def get_user(self, steam_id, fetch_persona_state=True):
"""Get :class:`.SteamUser` instance for ``steam id``
:param steam_id: steam id
:type steam_id: :class:`int`, :class:`.SteamID`
:param fetch_persona_state: whether to request person state when necessary
:type fetch_persona_state: :class:`bool`
:return: SteamUser instance
:rtype: :class:`.SteamUser`
"""
steam_id = int(steam_id)
suser = self._user_cache.get(steam_id, None)
if suser is None:
suser = SteamUser(steam_id, self)
self._user_cache[steam_id] = suser
if fetch_persona_state:
self.request_persona_state([steam_id])
return suser
def games_played(self, app_ids):
"""
Set the apps being played by the user
:param app_ids: a list of application ids
:type app_ids: :class:`list`
These app ids will be recorded in :attr:`current_games_played`.
"""
if not isinstance(app_ids, list):
raise ValueError("Expected app_ids to be of type list")
self.current_games_played = app_ids = list(map(int, app_ids))
self.send(MsgProto(EMsg.ClientGamesPlayed),
{'games_played': [{'game_id': app_id} for app_id in app_ids]}
)
def set_ui_mode(self, uimode):
"""
Set UI mode. Show little icon next to name in friend list. (e.g phone, controller, other)
:param uimode: UI mode integer
:type uimode: :class:`EClientUIMode`
These app ids will be recorded in :attr:`current_games_played`.
"""
self.send(MsgProto(EMsg.ClientCurrentUIMode), {'uimode': EClientUIMode(uimode)})
| 36.536913
| 98
| 0.650073
|
82cb29a4f09284b9aa216298d7389de3f483c9e8
| 1,479
|
py
|
Python
|
vqe.py
|
HPQC-LABS/Quantum-Graph-Spectra
|
b897d94dd03c48ffec5735b3dc5b86f8c3ab5a8f
|
[
"MIT"
] | 1
|
2020-07-29T06:42:32.000Z
|
2020-07-29T06:42:32.000Z
|
vqe.py
|
HPQC-LABS/Quantum-Graph-Spectra
|
b897d94dd03c48ffec5735b3dc5b86f8c3ab5a8f
|
[
"MIT"
] | null | null | null |
vqe.py
|
HPQC-LABS/Quantum-Graph-Spectra
|
b897d94dd03c48ffec5735b3dc5b86f8c3ab5a8f
|
[
"MIT"
] | 2
|
2021-03-29T13:40:47.000Z
|
2021-03-29T13:41:00.000Z
|
'''
@author: Josh Payne
Description: Implements VQE algorithm.
'''
import numpy as np
from scipy.optimize import minimize
from pyquil import Program, get_qc
from pyquil.gates import RX, RZ, CNOT
from pyquil.api import WavefunctionSimulator
from pyquil.paulis import PauliSum
sim = WavefunctionSimulator()
### QC/QVM ###
### input lattice name here ###
# qc = get_qc("Aspen-4-3Q-A")
### comment out if using Rigetti QCS ###
qc = get_qc("9q-generic-qvm")
def ansatz(params, num_layers, num_qubits):
program = Program()
for layer in range(num_layers):
for qubit in range(num_qubits):
program += RX(params[num_qubits*layer + qubit], qubit)
for qubit in range(num_qubits):
program += RZ(params[num_qubits*(layer+1) + qubit], qubit)
for qubit in range(num_qubits - 1):
program += CNOT(qubit, qubit+1)
return program
# Function calculating expectation value
def expectation(params, num_qubits, hamiltonian, num_layers):
program = ansatz(params, num_layers, num_qubits)
wave = sim.expectation(program,hamiltonian)
# print(wave)
return wave
def solveVQE(hamiltonian: PauliSum, num_layers) -> float:
num_qubits = hamiltonian.get_qubits()
initial_params = np.random.uniform(low = 0, high = 2*np.pi, size = ((num_layers+1)*len(num_qubits),))
minimum = minimize(expectation, initial_params, method='Nelder-Mead', args=(len(num_qubits), hamiltonian, num_layers))
return minimum.fun
| 32.866667
| 122
| 0.701149
|
4c6acdaac74e98b2945e442de531f93a4b9041f8
| 2,538
|
py
|
Python
|
Python/word-search-ii.py
|
bssrdf/LeetCode-5
|
746df5cff523361145a74d9d429dc541a7b99910
|
[
"MIT"
] | 68
|
2018-01-13T07:15:37.000Z
|
2022-02-20T12:58:24.000Z
|
Python/word-search-ii.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 2
|
2021-12-10T01:43:37.000Z
|
2021-12-14T21:48:53.000Z
|
Python/word-search-ii.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 63
|
2017-04-10T03:38:25.000Z
|
2022-03-17T23:24:51.000Z
|
# Time: O(m * n * l)
# Space: O(l)
#
# Given a 2D board and a list of words from the dictionary, find all words in the board.
#
# Each word must be constructed from letters of sequentially adjacent cell, where "adjacent" cells
# are those horizontally or vertically neighboring. The same letter cell may not be used more than once in a word.
#
# For example,
# Given words = ["oath","pea","eat","rain"] and board =
#
# [
# ['o','a','a','n'],
# ['e','t','a','e'],
# ['i','h','k','r'],
# ['i','f','l','v']
# ]
# Return ["eat","oath"].
# Note:
# You may assume that all inputs are consist of lowercase letters a-z.
#
class TrieNode(object):
# Initialize your data structure here.
def __init__(self):
self.is_string = False
self.leaves = {}
# Inserts a word into the trie.
def insert(self, word):
cur = self
for c in word:
if not c in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.is_string = True
class Solution(object):
def findWords(self, board, words):
"""
:type board: List[List[str]]
:type words: List[str]
:rtype: List[str]
"""
visited = [[False for j in xrange(len(board[0]))] for i in xrange(len(board))]
result = {}
trie = TrieNode()
for word in words:
trie.insert(word)
for i in xrange(len(board)):
for j in xrange(len(board[0])):
if self.findWordsRecu(board, trie, 0, i, j, visited, [], result):
return True
return result.keys()
def findWordsRecu(self, board, trie, cur, i, j, visited, cur_word, result):
if not trie or i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or visited[i][j]:
return
if board[i][j] not in trie.leaves:
return
cur_word.append(board[i][j])
next_node = trie.leaves[board[i][j]]
if next_node.is_string:
result["".join(cur_word)] = True
visited[i][j] = True
self.findWordsRecu(board, next_node, cur + 1, i + 1, j, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i - 1, j, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i, j + 1, visited, cur_word, result)
self.findWordsRecu(board, next_node, cur + 1, i, j - 1, visited, cur_word, result)
visited[i][j] = False
cur_word.pop()
| 32.538462
| 114
| 0.553586
|
8f06d2dc0d768dbc99e7cc4b8d13a8cf7bc2d68b
| 3,688
|
py
|
Python
|
DQNPongExperiment.py
|
bosqmode/DQNPongExperiment
|
0f62780de7c210e63a01f26e47731e795eb7391f
|
[
"MIT"
] | null | null | null |
DQNPongExperiment.py
|
bosqmode/DQNPongExperiment
|
0f62780de7c210e63a01f26e47731e795eb7391f
|
[
"MIT"
] | null | null | null |
DQNPongExperiment.py
|
bosqmode/DQNPongExperiment
|
0f62780de7c210e63a01f26e47731e795eb7391f
|
[
"MIT"
] | null | null | null |
from DQNAgent import DQN
import numpy as np
import gym
import gym.spaces
import cv2
ACTIONS = 3
MAX_EPISODES = 99999999
TRAIN = True
WEIGHTS_NAME = "PONGW"
INPUT_SHAPE = (50,50,1)
CROPPING = (30, 45)
EXPREPLAY_CAPACITY = 20000
OBSERVEPERIOD = 100000
START_EPSILON = 1
MIN_EPSILON = 0.05
BATCH_SIZE = 64
GAMMA = 0.99
EPSILON_DECAY = 0.000005
TARGET_UPDATE_GAME_INTERVAL = 1
class PongExperiment:
def __init__(self):
self.agent = DQN(num_actions=ACTIONS, input_shape=INPUT_SHAPE, gamma = GAMMA, expreplaycap = EXPREPLAY_CAPACITY, batchsize = BATCH_SIZE, startepsilon = START_EPSILON, minepsilon = MIN_EPSILON, epsilondecay = EPSILON_DECAY, obsperiod=OBSERVEPERIOD)
self.env = gym.make('Pong-v0').unwrapped
self.render = False
def ProcessFrame(self, frame):
img = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
img = img[CROPPING[0]:CROPPING[0]+img.shape[0]-CROPPING[1], 0:img.shape[1]-1]
img = cv2.resize(img, (INPUT_SHAPE[1], INPUT_SHAPE[0]), interpolation=cv2.INTER_CUBIC)
_,img = cv2.threshold(img,90,255,cv2.THRESH_BINARY)
if self.lastimg is None:
self.lastimg = img
img = cv2.addWeighted(img,1,self.lastimg,0.6,0)
self.lastimg = img
if self.render:
cv2.imshow('Input',cv2.resize(img, (500, 500), interpolation=cv2.INTER_CUBIC))
cv2.waitKey(20)
return (img/255)
def TrainEpisode(self, train):
self.lastimg = None
state = self.ProcessFrame(self.env.reset())
score = 0
while True:
action = self.agent.EpsilonGreedyAction(state)
newstate, reward, done, _ = self.env.step(action+1)
nextstate = self.ProcessFrame(newstate)
score += reward
self.agent.AddSample(state,action,reward,nextstate,done)
if done >= 1:
break
if train:
self.agent.Train()
state = nextstate
return score
def PlayEpisode(self, train):
self.lastimg = None
state = self.ProcessFrame(self.env.reset())
score = 0
while True:
action = self.agent.PredictAction(state)
newstate, reward, done, _ = self.env.step(action+1)
nextstate = self.ProcessFrame(newstate)
score += reward
if done >= 1:
break
state = nextstate
if self.render:
self.env.render()
return score
def Average(self, episodes):
avg = 0
for _ in range(episodes):
s = self.TrainEpisode(False)
avg += s
avg = avg/float(episodes)
return avg
def Train(self):
print("Starting training...")
self.render = False
for e in range(MAX_EPISODES):
episode_score = self.TrainEpisode(True)
self.agent.lastscore = episode_score
print("Game: {0}, Score: {1:.2f}".format(e, episode_score))
if e % 5 == 0:
average = self.Average(5)
self.agent.SaveWeights(WEIGHTS_NAME)
print("Average score over 5 games: {0}".format(average))
if e % TARGET_UPDATE_GAME_INTERVAL == 0 and e is not 0:
self.agent.UpdateTargetQ()
def Play(self):
self.agent.LoadWeights(WEIGHTS_NAME)
self.render = True
while True:
score = self.PlayEpisode(False)
print("Score {0:.2f}".format(score))
if __name__ == "__main__":
experiment = PongExperiment()
if TRAIN:
experiment.Train()
else:
experiment.Play()
| 29.741935
| 255
| 0.588666
|
1f17c00f13129561a5790b6393f3c9e23574dad9
| 119
|
py
|
Python
|
python-basic/logic.py
|
puppyhang/bash-collections
|
e105e55ae772d5d95ded98c72e49ebea6d6b8824
|
[
"Apache-2.0"
] | null | null | null |
python-basic/logic.py
|
puppyhang/bash-collections
|
e105e55ae772d5d95ded98c72e49ebea6d6b8824
|
[
"Apache-2.0"
] | null | null | null |
python-basic/logic.py
|
puppyhang/bash-collections
|
e105e55ae772d5d95ded98c72e49ebea6d6b8824
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
a = 1099
b = 109
if a == b:
print("a==b")
elif a > b:
print("a>b")
else:
print("a!=b")
| 10.818182
| 18
| 0.478992
|
8d9ca4713e16fef6d93add250add0478bae80913
| 472
|
py
|
Python
|
tests/test_news.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | 10
|
2020-04-09T06:34:48.000Z
|
2022-02-07T09:39:22.000Z
|
tests/test_news.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | null | null | null |
tests/test_news.py
|
rochi88/dshare
|
9dc46baff822be2ae7a7541fa10535a0299fbb5e
|
[
"MIT"
] | 5
|
2020-08-06T06:54:04.000Z
|
2021-09-06T12:28:23.000Z
|
# _*_ coding:utf-8 _*_
'''
Created on 2021-May-28
@author: Raisul Islam
'''
import unittest
import datetime as dt
from bdshare import get_agm_news, get_all_news
class Test(unittest.TestCase):
def test_get_agm_news(self):
df = get_agm_news()
print(df.to_string())
def test_get_all_news(self):
end = dt.datetime.now().date()
df = get_all_news('BATBC')
print(df.to_string())
if __name__ == "__main__":
unittest.main()
| 19.666667
| 46
| 0.65678
|
285e1418ce54e36d6e329e2587c831a7054fcbd2
| 1,499
|
py
|
Python
|
temp.py
|
abdallahabusedo/LinearRegressionEX
|
e58078e04eec78b7af7bac2f38bcf3ec28c6d3de
|
[
"MIT"
] | null | null | null |
temp.py
|
abdallahabusedo/LinearRegressionEX
|
e58078e04eec78b7af7bac2f38bcf3ec28c6d3de
|
[
"MIT"
] | null | null | null |
temp.py
|
abdallahabusedo/LinearRegressionEX
|
e58078e04eec78b7af7bac2f38bcf3ec28c6d3de
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize']= (20.0,10.0)
#reading data
data=pd.read_csv("headbrain.csv")
print(data.shape)
data.head()
#Callecting X & Y
X =data["Head Size(cm^3)"].values
Y =data["Brain Weight(grams)"].values
# Mean X & Y
MeanX = np.mean(X)
MeanY = np.mean(Y)
#total number of values
n= len(X)
# using the formula to calculate B1 and B0
numer = 0
denom = 0
for i in range(n):
numer +=(X[i]-MeanX)*(Y[i]-MeanY)
denom +=(X[i]-MeanX)**2
B1 = numer /denom
B0 = MeanY - (B1 * MeanX)
#print Cofficients
print(B1, B0)
#plotting Values and Regression Line
MaxX= np.max(X)+100
MinX=np.min(X)-100
#Calculating Line values X and Y
x = np.linspace(MinX,MaxX,1000)
y =B0+B1*x
#ploting line
plt.plot(x,y,color='#58b970',label="Regression")
#ploting scatter points
plt.scatter(X,Y,color="#ef5423", label="Scatter Plot")
plt.xlabel("Head Size in Cm3")
plt.ylabel("Brain Weght in grams")
plt.legend()
plt.show()
ss_t = 0
ss_r = 0
for i in range(n):
y_pred = B0 +B1 * X[i]
ss_t += (Y[i] - MeanY) **2
ss_r += (Y[i] - y_pred) **2
r2 = 1- (ss_r/ss_t)
print(r2)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
#Cannot use Rank 1 matrix in scikit learn
X= X.reshape((n,1))
#creating Model
reg=LinearRegression()
#fitting training data
reg = reg.fit(X,Y)
#Y prediction
y_pred = reg.predict(X)
#calculating R2 Score
R2_score=reg.score(X,Y)
print(R2_score)
| 19.986667
| 54
| 0.687125
|
c2972cbee4ec49496a22c9f97ac8883d9d7c9989
| 439
|
py
|
Python
|
enaml/enaml/web/item.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 11
|
2015-03-14T14:30:51.000Z
|
2022-03-15T13:01:44.000Z
|
enaml/enaml/web/item.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 3
|
2015-01-31T11:12:56.000Z
|
2022-03-14T00:53:25.000Z
|
enaml/enaml/web/item.py
|
ContinuumIO/ashiba
|
a93e7785d1fcf397baeb8a0b687a162a2b2aef3d
|
[
"BSD-3-Clause"
] | 4
|
2015-01-27T01:56:14.000Z
|
2021-02-23T07:21:20.000Z
|
from atom.api import Unicode
from enaml.core.declarative import d_
from html_object import HTMLObject
from lxml.html import builder as E
class Item(HTMLObject):
tag = E.LI
text = d_(Unicode())
def initialize(self):
super(Item, self).initialize()
def buildHTML(self, *args):
self.addTags()
self.addText(self.text)
self.addAttributes()
return super(Item, self).buildHTML(*args)
| 19.954545
| 49
| 0.665148
|
caf2f1d459fef77b39d8cdd82823304674a777cb
| 6,001
|
py
|
Python
|
p4a-cloud/master/web/table.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | null | null | null |
p4a-cloud/master/web/table.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | null | null | null |
p4a-cloud/master/web/table.py
|
WeilerWebServices/Kivy
|
54e3438156eb0c853790fd3cecc593f09123f892
|
[
"MIT"
] | null | null | null |
'''
DataTable: a flask/sqlachemy module to generate HTML with server side data.
The project use datatable from http://www.datatables.net/
'''
__all__ = ('Table', 'Column')
import simplejson
from flask import url_for, request
from sqlalchemy import asc, desc
class Column(object):
def __init__(self, name, field, display=None, formatter=None, width=None):
super(Column, self).__init__()
self.name = name
self.field = field
self.display = display
self.formatter = formatter
self.width = width
def __html__(self):
return '<th>%s</th>' % self.name
def __js_def__(self, index, out):
if self.width:
out.append({'sWidth': self.width, 'aTargets': [index]})
def get_field(self, entry):
if self.display:
value = self.display(entry)
else:
value = getattr(entry, self.field)
if self.formatter:
return self.formatter(value)
return value
class Table(object):
__uniqid = 0
db_table = None
db_session = None
display_length = 20
activate_sort = True
activate_info = True
activate_paginate = True
activate_scroll_infinite = False
activate_filter = True
activate_length_change = True
activate_scroll_collapse = True
pagination_type = 'full_numbers'
scroll_x = ''
scroll_y = ''
href_link = None
def __init__(self, **kwargs):
super(Table, self).__init__()
self.html_id = kwargs.get('html_id', None)
if self.html_id is None:
Table.__uniqid += 1
self.html_id = 'datatable%d' % Table.__uniqid
def query(self):
return self.db_session.query(self.db_table)
def ajax(self):
q = self.query()
# total number of entries
count = q.count()
# search
if 'sSearch' in request.args:
search = None
for col in self.columns:
field = getattr(self.db_table, col.field)
field = field.like('%%%s%%' % request.args['sSearch'])
if search is None:
search = field
else:
search = search | field
q = q.filter(search)
# sorting
if 'iSortingCols' in request.args:
field = self.columns[int(request.args['iSortCol_0'])].field
db_field = getattr(self.db_table, field)
if request.args['sSortDir_0'] == 'asc':
db_field = asc(db_field)
else:
db_field = desc(db_field)
q = q.order_by(db_field)
# get the number after filter
count_filtered = q.count()
# pagination
if self.activate_scroll_infinite:
limit = self.display_length
else:
limit = request.args['iDisplayLength']
offset = request.args['iDisplayStart']
entries = q.offset(offset).limit(limit)
# construct the output
data = []
columns = self.columns
for entry in entries:
data.append([col.get_field(entry) for col in columns])
return simplejson.dumps({
'sEcho': request.args['sEcho'],
'iTotalRecords': count,
'iTotalDisplayRecords': count_filtered,
'aaData': data
})
def __json_columns_defs__(self):
out = []
for index, col in enumerate(self.columns):
col.__js_def__(index, out)
return simplejson.dumps(out)
def __js_rowclick__(self):
return ''
def __html_columns__(self):
out = ['<tr>']
for col in self.columns:
out.append(col.__html__())
out.append('</tr>')
return ''.join(out)
def __html__(self):
data = {
'html_id': self.html_id,
'columns': self.__html_columns__(),
'click_callback': self.__js_rowclick__(),
# datatable
'iDisplayLength': str(int(self.display_length)),
'bSort': str(bool(self.activate_sort)).lower(),
'bInfo': str(bool(self.activate_info)).lower(),
'bPaginate': str(bool(self.activate_paginate)).lower(),
'bScrollInfinite': str(bool(self.activate_scroll_infinite)).lower(),
'bScrollCollapse': str(bool(self.activate_scroll_collapse)).lower(),
'bFilter': str(bool(self.activate_filter)).lower(),
'bLengthChange': str(bool(self.activate_length_change)).lower(),
'sScrollX': str(self.scroll_x),
'sScrollY': str(self.scroll_y),
'sPaginationType': str(self.pagination_type),
'sAjaxSource': url_for(self.source),
'aoColumnDefs': self.__json_columns_defs__()
}
html = '''
<script type="text/javascript">
$(document).ready(function() {
$("#%(html_id)s").dataTable({
'bJQueryUI': true,
'bProcessing': true,
'bServerSide': true,
'bScrollInfinite': %(bScrollInfinite)s,
'bScrollCollapse': %(bScrollCollapse)s,
'bSort': %(bSort)s,
'bInfo': %(bInfo)s,
'bFilter': %(bFilter)s,
'bLengthChange': %(bLengthChange)s,
'bPaginate': %(bPaginate)s,
'iDisplayLength': %(iDisplayLength)s,
'sAjaxSource': '%(sAjaxSource)s',
'sPaginationType': '%(sPaginationType)s',
'sScrollY': '%(sScrollY)s',
'sScrollX': '%(sScrollX)s',
'aoColumnDefs': %(aoColumnDefs)s
});
});
$("#%(html_id)s tbody tr").live('click', function() {
%(click_callback)s
});
</script>
<table id="%(html_id)s">
<thead>
%(columns)s
</thead>
<tbody>
</tbody>
</table>
''' % data
return html
| 31.751323
| 80
| 0.543576
|
a63498c987c88ec707f507487aded691c63828d2
| 9,533
|
py
|
Python
|
CIM14/IEC61968/PaymentMetering/AuxiliaryAgreement.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58
|
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM14/IEC61968/PaymentMetering/AuxiliaryAgreement.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12
|
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM14/IEC61968/PaymentMetering/AuxiliaryAgreement.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35
|
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61968.Common.Agreement import Agreement
class AuxiliaryAgreement(Agreement):
"""An ad-hoc auxiliary account agreement associated with a customer agreement, not part of the customer's account, but typically subject to formal agreement between customer and supplier (utility). Typically this is used to collect revenue owing by the customer for other services or arrears accrued with the utility for other services. It is typically linked to a prepaid token purchase transaction, thus forcing the customer to make a payment towards settlement of the auxiliary account balance whenever he needs to purchase a prepaid token for electricity. The present status of AuxiliaryAgreement can be defined in the context of the utility's business rules, for example: enabled, disabled, pending, over recovered, under recovered, written off, etc.
"""
def __init__(self, arrearsInterest=0.0, vendPortion=0.0, minAmount=0.0, auxPriorityCode='', subCategory='', auxRef='', auxCycle='', vendPortionArrear=0.0, payCycle='', fixedAmount=0.0, AuxiliaryAccounts=None, CustomerAgreement=None, *args, **kw_args):
"""Initialises a new 'AuxiliaryAgreement' instance.
@param arrearsInterest: The interest per annum to be charged prorata on AuxiliaryAccount.dueArrears at the end of each payCycle.
@param vendPortion: The percentage of the transaction amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement when payments are not in arrears. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
@param minAmount: The minimum amount that must be paid at any transaction towards settling this AuxiliryAgreement or reducing the balance.
@param auxPriorityCode: The coded priority indicating the priority that this AuxiliaryAgreement has above other AuxiliaryAgreements (associated with the same customer agreement) when it comes to competing for settlement from a payment transaction or token purchase.
@param subCategory: Sub-category of this AuxiliaryAgreement as sub-classification of the inherited 'category'.
@param auxRef: A local reference to this AuxiliaryAgreement defined in the context of the implementation and not related to IdentifiedObject.mRID.
@param auxCycle: The frequency for automatically recurring auxiliary charges, where AuxiliaryAccount.initialCharge is recursively added to AuxiliaryAccount.dueCurrent at the start of each auxCycle. For example: on a specified date and time; hourly; daily; weekly; monthly; 3-monthly; 6-monthly; 12-monthly; etc.
@param vendPortionArrear: The percentage of the transaction amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement when payments are in arrears. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
@param payCycle: The contractually expected payment frequency (by the customer). Examples are: ad-hoc; on specified date; hourly, daily, weekly, monthly. etc.
@param fixedAmount: The fixed amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
@param AuxiliaryAccounts: All auxiliary accounts regulated by this agreement.
@param CustomerAgreement: Customer agreement this (non-service related) auxiliary agreement refers to.
"""
#: The interest per annum to be charged prorata on AuxiliaryAccount.dueArrears at the end of each payCycle.
self.arrearsInterest = arrearsInterest
#: The percentage of the transaction amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement when payments are not in arrears. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
self.vendPortion = vendPortion
#: The minimum amount that must be paid at any transaction towards settling this AuxiliryAgreement or reducing the balance.
self.minAmount = minAmount
#: The coded priority indicating the priority that this AuxiliaryAgreement has above other AuxiliaryAgreements (associated with the same customer agreement) when it comes to competing for settlement from a payment transaction or token purchase.
self.auxPriorityCode = auxPriorityCode
#: Sub-category of this AuxiliaryAgreement as sub-classification of the inherited 'category'.
self.subCategory = subCategory
#: A local reference to this AuxiliaryAgreement defined in the context of the implementation and not related to IdentifiedObject.mRID.
self.auxRef = auxRef
#: The frequency for automatically recurring auxiliary charges, where AuxiliaryAccount.initialCharge is recursively added to AuxiliaryAccount.dueCurrent at the start of each auxCycle. For example: on a specified date and time; hourly; daily; weekly; monthly; 3-monthly; 6-monthly; 12-monthly; etc.
self.auxCycle = auxCycle
#: The percentage of the transaction amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement when payments are in arrears. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
self.vendPortionArrear = vendPortionArrear
#: The contractually expected payment frequency (by the customer). Examples are: ad-hoc; on specified date; hourly, daily, weekly, monthly. etc.
self.payCycle = payCycle
#: The fixed amount that must be collected from each vending transaction towards settlement of this AuxiliaryAgreement. Note that there may be multiple tokens vended per vending transaction, but this is not relevant.
self.fixedAmount = fixedAmount
self._AuxiliaryAccounts = []
self.AuxiliaryAccounts = [] if AuxiliaryAccounts is None else AuxiliaryAccounts
self._CustomerAgreement = None
self.CustomerAgreement = CustomerAgreement
super(AuxiliaryAgreement, self).__init__(*args, **kw_args)
_attrs = ["arrearsInterest", "vendPortion", "minAmount", "auxPriorityCode", "subCategory", "auxRef", "auxCycle", "vendPortionArrear", "payCycle", "fixedAmount"]
_attr_types = {"arrearsInterest": float, "vendPortion": float, "minAmount": float, "auxPriorityCode": str, "subCategory": str, "auxRef": str, "auxCycle": str, "vendPortionArrear": float, "payCycle": str, "fixedAmount": float}
_defaults = {"arrearsInterest": 0.0, "vendPortion": 0.0, "minAmount": 0.0, "auxPriorityCode": '', "subCategory": '', "auxRef": '', "auxCycle": '', "vendPortionArrear": 0.0, "payCycle": '', "fixedAmount": 0.0}
_enums = {}
_refs = ["AuxiliaryAccounts", "CustomerAgreement"]
_many_refs = ["AuxiliaryAccounts"]
def getAuxiliaryAccounts(self):
"""All auxiliary accounts regulated by this agreement.
"""
return self._AuxiliaryAccounts
def setAuxiliaryAccounts(self, value):
for x in self._AuxiliaryAccounts:
x.AuxiliaryAgreement = None
for y in value:
y._AuxiliaryAgreement = self
self._AuxiliaryAccounts = value
AuxiliaryAccounts = property(getAuxiliaryAccounts, setAuxiliaryAccounts)
def addAuxiliaryAccounts(self, *AuxiliaryAccounts):
for obj in AuxiliaryAccounts:
obj.AuxiliaryAgreement = self
def removeAuxiliaryAccounts(self, *AuxiliaryAccounts):
for obj in AuxiliaryAccounts:
obj.AuxiliaryAgreement = None
def getCustomerAgreement(self):
"""Customer agreement this (non-service related) auxiliary agreement refers to.
"""
return self._CustomerAgreement
def setCustomerAgreement(self, value):
if self._CustomerAgreement is not None:
filtered = [x for x in self.CustomerAgreement.AuxiliaryAgreements if x != self]
self._CustomerAgreement._AuxiliaryAgreements = filtered
self._CustomerAgreement = value
if self._CustomerAgreement is not None:
if self not in self._CustomerAgreement._AuxiliaryAgreements:
self._CustomerAgreement._AuxiliaryAgreements.append(self)
CustomerAgreement = property(getCustomerAgreement, setCustomerAgreement)
| 75.062992
| 759
| 0.755271
|
49a8563ba7202a9bce2aae775f7187474ee07241
| 4,557
|
py
|
Python
|
Tools/N++replaytolibTAS.py
|
EuniverseCat/NPlusPlusTAS
|
cf1289449a46754f4984acd307745689d5df1044
|
[
"MIT"
] | 1
|
2020-01-01T01:24:32.000Z
|
2020-01-01T01:24:32.000Z
|
Tools/N++replaytolibTAS.py
|
EuniverseCat/NPlusPlusTAS
|
cf1289449a46754f4984acd307745689d5df1044
|
[
"MIT"
] | null | null | null |
Tools/N++replaytolibTAS.py
|
EuniverseCat/NPlusPlusTAS
|
cf1289449a46754f4984acd307745689d5df1044
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from urllib.error import HTTPError
import json
import time
import zlib
IGNORED_PLAYERS = [
"Kronogenics",
"BlueIsTrue",
"fiordhraoi",
"cheeseburgur101",
"Jey",
"jungletek",
"Hedgy",
"ᕈᘎᑕᒎᗩn ᙡiᗴᒪḰi",
"Venom",
"EpicGamer10075",
"Altii",
"Puςe",
"Floof The Goof",
]
tabOffsets = {'SI':0, 'S':600, 'SU':2400, 'SL':1200, 'Q':1800 , '!':3000}
rowSizes = {'SI':5, 'S':20, 'SU':20, 'SL':20, 'Q':4, '!':4}
rowOffsets = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'X':5}
def scoresUri(level):
uri = "https://dojo.nplusplus.ninja/prod/steam/get_scores?steam_id={0}&steam_auth=&level_id={1}"
uri = uri.format(steamID, level)
return uri
def replayUri(replay):
uri = "https://dojo.nplusplus.ninja/prod/steam/get_replay?steam_id={0}&steam_auth=&replay_id={1}"
uri = uri.format(steamID, replay)
return uri
def parseLevelID(ID):
try:
sections = ID.upper().split("-")
tab = sections[0]
row = sections[1]
if tab == "Q" or tab == "!":
episode = 0
level = int(sections[2])
if level > 23:
return None
levelID = tabOffsets[tab] + level * 5 + rowOffsets[row] / 5
return levelID
else:
episode = int(sections[2])
level = int(sections[3])
if (row == 'X' and tab == 'SI') or episode >= rowSizes[tab] or level > 4:
return None
if row == 'X':
levelID = tabOffsets[tab] + rowSizes[tab] * 25 + episode * 5 + level
else:
levelID = levelID = tabOffsets[tab] + episode * 25 + rowOffsets[row] + level
return levelID
except:
return None
def NametoID(name):
pass
def GetScores(nameorID, rank, toRank = None):
if not toRank: toRank = rank + 1
else: toRank += 1
try:
ID = int(nameorID)
except ValueError:
ID = parseLevelID(nameorID)
if ID == None:
ID = NametoID(nameorID)
if ID == None:
print("Level not found.")
return
for i in range(6):
if i == 5:
return None
try:
scores = json.loads(urlopen(scoresUri(ID)).read())
if scores == -1337:
print("Connection expired. Press enter after reconnecting to Metanet servers.")
input()
continue
break
except HTTPError:
print("HTTP Error. Press enter to try again.")
input()
scores = scores['scores']
ignored = 0
i = 0
entries = []
while i - ignored < toRank and i < 20:
entry = scores[i]
if entry['user_name'] in IGNORED_PLAYERS:
ignored += 1
elif i - ignored >= rank:
entries.append((entry['score'], entry['replay_id'], entry['user_name']))
i += 1
return entries
def SaveReplay(nameorID, rank, toRank = None):
scores = GetScores(nameorID, rank, toRank)
if scores == None:
return
if scores == []:
print("Replay not found.")
return
for score, replayID, name in scores:
for i in range(5):
try:
replay = urlopen(replayUri(replayID)).read()
if replay == '-1337':
print("Connection expired. Press enter after reconnecting to Metanet servers.")
input()
continue
break
except HTTPError:
print("HTTP Error. Press enter to try again.")
replay = zlib.decompress(replay[16:])[30:]
filename = f'{nameorID} - {score} - {name}.ltm'
output = open(filename, 'w')
for frame in replay:
inputs = ''
if frame | 1 == frame: inputs += '007a:'
if frame | 2 == frame: inputs += 'ff53:'
if frame | 4 == frame: inputs += 'ff51:'
inputs = '|' + inputs[:-1] + '|\n'
output.write(inputs)
output.close()
print(f'Saved {filename}')
steamID = input("Enter Steam ID #: ")
while True:
level = input("Enter in-game or Metanet level ID. (q to quit)\n")
if level == "":
continue
if level.lower() == 'q':
break
if level[0] == '?':
level = 'Q' + level[1:]
try:
rank = int(input("Enter rank of replay to convert.\n"))
except ValueError:
print("Invalid rank.")
continue
SaveReplay(level, rank)
| 30.38
| 101
| 0.519421
|
b89c43c75e3d0614d010d597350956ef7aa2a956
| 410
|
py
|
Python
|
codeforces/div3/1203/B/B.py
|
mathemage/CompetitiveProgramming
|
fe39017e3b017f9259f9c1e6385549270940be64
|
[
"MIT"
] | 2
|
2015-08-18T09:51:19.000Z
|
2019-01-29T03:18:10.000Z
|
codeforces/div3/1203/B/B.py
|
mathemage/CompetitiveProgramming
|
fe39017e3b017f9259f9c1e6385549270940be64
|
[
"MIT"
] | null | null | null |
codeforces/div3/1203/B/B.py
|
mathemage/CompetitiveProgramming
|
fe39017e3b017f9259f9c1e6385549270940be64
|
[
"MIT"
] | null | null | null |
q = int(input())
for _ in range(q):
n = int(input())
a = list(map(int, input().split()))
a.sort()
a2 = [a[i] for i in range(0, len(a), 2)]
a3 = [a[i+1] for i in range(0, len(a), 2)]
if a2 != a3:
print("NO")
continue
a3.reverse()
areas = [ai * aj for ai, aj in zip(a2, a3)]
if areas == [areas[0]] * 2 * n:
print("YES")
else:
print("NO")
| 21.578947
| 47
| 0.458537
|
267b435d6c0ca6ba932e44bde6d895441bc21867
| 3,139
|
py
|
Python
|
raven/processors.py
|
alex/raven
|
28227282891d7eb7e3600a458d0d8c7164f63dcb
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T12:45:40.000Z
|
2015-11-08T12:45:40.000Z
|
raven/processors.py
|
openlabs/raven
|
c065a96456c4e99a3726a599630dfe7fee9d29e6
|
[
"BSD-3-Clause"
] | null | null | null |
raven/processors.py
|
openlabs/raven
|
c065a96456c4e99a3726a599630dfe7fee9d29e6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
raven.core.processors
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from raven.utils import varmap
class Processor(object):
def __init__(self, client):
self.client = client
def get_data(self, data, **kwargs):
return
def process(self, data, **kwargs):
resp = self.get_data(data, **kwargs)
if resp:
data = resp
return data
class RemovePostDataProcessor(Processor):
"""
Removes HTTP post data.
"""
def process(self, data, **kwargs):
if 'sentry.interfaces.Http' in data:
data['sentry.interfaces.Http'].pop('data', None)
return data
class RemoveStackLocalsProcessor(Processor):
"""
Removes local context variables from stacktraces.
"""
def process(self, data, **kwargs):
if 'sentry.interfaces.Stacktrace' in data:
for frame in data['sentry.interfaces.Stacktrace'].get('frames', []):
frame.pop('vars', None)
return data
class SanitizePasswordsProcessor(Processor):
"""
Asterisk out passwords from password fields in frames, http,
and basic extra data.
"""
MASK = '*' * 8
FIELDS = frozenset(['password', 'secret', 'passwd'])
VALUES_RE = re.compile(r'^\d{16}$')
def sanitize(self, key, value):
if value is None:
return
if isinstance(value, basestring) and self.VALUES_RE.match(value):
return self.MASK
if not key: # key can be a NoneType
return value
key = key.lower()
for field in self.FIELDS:
if field in key:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
if 'frames' not in data:
return
for frame in data['frames']:
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sanitize, frame['vars'])
def filter_http(self, data):
for n in ('data', 'cookies', 'headers', 'env', 'query_string'):
if n not in data:
continue
if isinstance(data[n], basestring) and '=' in data[n]:
# at this point we've assumed it's a standard HTTP query
querybits = []
for bit in data[n].split('&'):
chunk = bit.split('=')
if len(chunk) == 2:
querybits.append((chunk[0], self.sanitize(*chunk)))
else:
querybits.append(chunk)
data[n] = '&'.join('='.join(k) for k in querybits)
else:
data[n] = varmap(self.sanitize, data[n])
def process(self, data, **kwargs):
if 'sentry.interfaces.Stacktrace' in data:
self.filter_stacktrace(data['sentry.interfaces.Stacktrace'])
if 'sentry.interfaces.Http' in data:
self.filter_http(data['sentry.interfaces.Http'])
return data
| 28.026786
| 80
| 0.553998
|
d012f82a21e40f36678f5d3ac1df9f2d87cfe167
| 2,048
|
py
|
Python
|
d2ff.py
|
LeoZ100/D2FFConverter
|
f36f2c64f5f5981ae0d5edba0ded9ce19878bb0b
|
[
"MIT"
] | 1
|
2020-07-13T13:33:58.000Z
|
2020-07-13T13:33:58.000Z
|
d2ff.py
|
LeoZ100/d2ff
|
f36f2c64f5f5981ae0d5edba0ded9ce19878bb0b
|
[
"MIT"
] | null | null | null |
d2ff.py
|
LeoZ100/d2ff
|
f36f2c64f5f5981ae0d5edba0ded9ce19878bb0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import csv
import json
def transform_to_importable_firefox_file(input_file, output_file):
with open(input_file) as dashlane_file, open(output_file, 'w+') as firefox_file:
password_section = get_password_section(dashlane_file)
firefox_csv = initialize_firefox_csv(firefox_file)
for password_entry in password_section:
if password_entry['password'] is not '':
url = f"https://www.{password_entry['domain']}"
login = password_entry['login'] if password_entry['login'] is not '' else password_entry['email']
password = password_entry['password']
firefox_csv.writerow([url, login, password])
def initialize_firefox_csv(firefox_file):
firefox_csv = csv.writer(firefox_file, quotechar=',', quoting=csv.QUOTE_MINIMAL)
firefox_csv.writerow(['url', 'username', 'password'])
return firefox_csv
def get_password_section(dashlane_file):
try:
return json.load(dashlane_file)['AUTHENTIFIANT']
except:
print(f'ERROR: {dashlane_file.name} is not a valid Dashlane JSON Export file...')
exit(1)
def get_io_files():
parser = get_parser()
args = parser.parse_args()
return args.input_file, args.output_file if args.output_file is not None else 'FirefoxImport.csv'
def get_parser():
parser = argparse.ArgumentParser(description='Converts a Dashlane Password Manager JSON Export '
'into a compatible format for importing passwords into Firefox.')
parser.add_argument('input_file',
help='The input "Dashlane JSON export" file to be processed')
parser.add_argument('-o',
'--output-file',
help='The output Firefox CSV import file to be produced')
return parser
def main():
input_file, output_file = get_io_files()
transform_to_importable_firefox_file(input_file, output_file)
if __name__ == '__main__':
main()
| 34.711864
| 114
| 0.666992
|
4f70d6e47e106da0fa234fc4d3a99e207fb714e7
| 7,939
|
py
|
Python
|
xdagtool/xdu.py
|
rulerson/xdagtool
|
1eb7fb352dd7cadcd6611279720210e04a5ec3f6
|
[
"MIT"
] | null | null | null |
xdagtool/xdu.py
|
rulerson/xdagtool
|
1eb7fb352dd7cadcd6611279720210e04a5ec3f6
|
[
"MIT"
] | null | null | null |
xdagtool/xdu.py
|
rulerson/xdagtool
|
1eb7fb352dd7cadcd6611279720210e04a5ec3f6
|
[
"MIT"
] | null | null | null |
import click
from dateutil import parser
from pathlib import Path
import shutil
g_verbose: bool = False
def time_slice(time_start, time_end):
time_start_ts = int(time_start.timestamp())
time_end_ts = int(time_end.timestamp())
time_start_slice = time_start_ts >> 6
time_end_slice = time_end_ts >> 6
if g_verbose:
print('time_start_ts={0}, time_start_hex={0:X}, time_start_slice={1:X}, time_end_ts={2}, '
'time_end_hex={2:X}, time_end_slice={3:X}'.format(
time_start_ts, time_start_slice, time_end_ts, time_end_slice))
start_level_4 = time_start_slice & 0xFF
start_level_3 = (time_start_slice >> 8) & 0xFF
start_level_2 = (time_start_slice >> 16) & 0xFF
start_level_1 = (time_start_slice >> 24) & 0xFF
end_level_4 = time_end_slice & 0xFF
end_level_3 = (time_end_slice >> 8) & 0xFF
end_level_2 = (time_end_slice >> 16) & 0xFF
end_level_1 = (time_end_slice >> 24) & 0xFF
if g_verbose:
print('start_level={:x}-{:x}-{:x}-{:x}, end_level={:x}-{:x}-{:x}-{:x}'.format(
start_level_1, start_level_2, start_level_3, start_level_4,
end_level_1, end_level_2, end_level_3, end_level_4))
print('start_level={}-{}-{}-{}, end_level={}-{}-{}-{}'.format(
start_level_1, start_level_2, start_level_3, start_level_4,
end_level_1, end_level_2, end_level_3, end_level_4))
return time_start_slice, time_end_slice
class SlicePath:
def __init__(self, time_start_slice, time_end_slice, debug=False):
self.__time_start_slice = time_start_slice
self.__time_end_slice = time_end_slice
self.__debug = debug
self.__paths = []
def paths(self):
self.__iter_level1()
return self.__paths
def __iter_level1(self):
for x in range(0, 0xFF + 1):
xv_min = x << 24
xv_max = xv_min + 0xFFFFFF
level_path = '{}'.format(x) if self.__debug else '{:0>2x}'.format(x)
if xv_max < self.__time_start_slice or xv_min > self.__time_end_slice:
continue
if xv_min > self.__time_start_slice and xv_max < self.__time_end_slice:
self.__paths.append('{}/'.format(level_path))
else:
self.__iter_level2(xv_min, level_path)
def __iter_level2(self, level_start, parent_path):
for x in range(0, 0xFF + 1):
xv_min = x << 16
xv_max = xv_min + 0xFFFF
level_path = '{}/{}'.format(parent_path, x) if self.__debug else '{}/{:0>2x}'.format(parent_path, x)
if level_start + xv_max < self.__time_start_slice or level_start + xv_min > self.__time_end_slice:
continue
if level_start + xv_min > self.__time_start_slice and level_start + xv_max < self.__time_end_slice:
self.__paths.append('{}/'.format(level_path))
else:
self.__iter_level3(level_start + xv_min, level_path)
def __iter_level3(self, level_start, parent_path):
for x in range(0, 0xFF + 1):
xv_min = x << 8
xv_max = xv_min + 0xFF
level_path = '{}/{}'.format(parent_path, x) if self.__debug else '{}/{:0>2x}'.format(parent_path, x)
if level_start + xv_max < self.__time_start_slice or level_start + xv_min > self.__time_end_slice:
continue
if level_start + xv_min > self.__time_start_slice and level_start + xv_max < self.__time_end_slice:
self.__paths.append('{}/'.format(level_path))
else:
self.__iter_level4(level_start + xv_min, level_path)
def __iter_level4(self, level_start, parent_path):
for x in range(0, 0xFF + 1):
level_path = '{}/{}.dat'.format(parent_path, x) if self.__debug else '{}/{:0>2x}.dat'.format(parent_path, x)
if level_start + x >= self.__time_start_slice and level_start + x <= self.__time_end_slice:
self.__paths.append('{}'.format(level_path))
def path_copy(src_path: Path, dest_path: Path):
# mkdir -p
dest_path.parent.mkdir(parents=True, exist_ok=True)
# copy go
src_path_name, dest_path_name = str(src_path), str(dest_path)
if src_path.is_dir():
if g_verbose:
print('copy dir from {} to {}'.format(src_path_name, dest_path_name))
shutil.copytree(src_path_name, dest_path_name)
else:
if g_verbose:
print('copy file from {} to {}'.format(src_path_name, dest_path_name))
shutil.copy2(src_path_name, dest_path_name)
def paths_copy(srcdir: str, destdir: str, paths):
srcdir_path = Path(srcdir).resolve()
destdir_path = Path(destdir).resolve() if Path(destdir).is_absolute() else (Path.cwd() / Path(destdir)).resolve()
print('copy go, srcdir_p={}, destdir_p={}'.format(srcdir_path, destdir_path))
for path_v in paths:
file_path = Path(path_v)
src_path = srcdir_path / file_path
dest_path = destdir_path / file_path
# skip no-exist file/dir
if not src_path.exists():
continue
# copy go
path_copy(src_path, dest_path)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-s', '--src-dir', 'srcdir', default='.', show_default=True,
help='source directory copy from, default to current directory.',
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True), )
@click.option('-d', '--dst-dir', 'dstdir', default=None, show_default=True,
help='dest directory copy to, default to dryrun mode if not supplied.',
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True))
@click.option('-r', '--dryrun', is_flag=True,
help='dryrun mode, show all files and directories prepare to process, but do not really run.',
default=False, show_default=True)
@click.option('-v', '--verbose', is_flag=True,
help='verbose mode',
default=False, show_default=True)
@click.argument('time-start', type=str)
@click.argument('time-end', type=str, required=False, default=None)
def cmd_go(srcdir, dstdir, dryrun, time_start, time_end, verbose):
"""xdag blocks slicing.
<command> start-time end-time
show all files and directories prepare to process between start-time and end-time.
star-time, end-time, iso8601 format.
<command> time
find target block filename for specific time slice. (start-time=end-time=time.)
time iso8601 format.
<command> -s srcdir -d dstdir start-time end-time
copy all files and directories between start-time and end-time from srcdir to dstdir.
notes:
xdag main-net era time: 2018-01-05T22:45:00
lack of sums.dat for some directorys.
iso8601 time format examples (https://dateutil.readthedocs.io/en/stable/parser.html#dateutil.parser.isoparse):
2018-01-05T22:45:00Z, 2018-01-05T22:45:00+08:00, always use utc time or timezone suffix
"""
# command parameters
# print(srcdir, dstdir, dryrun, time_start, time_end)
global g_verbose
g_verbose = verbose
if not dstdir:
dryrun = True
time_start_dt = parser.isoparse(time_start)
time_end_dt = parser.isoparse(time_end) if time_end else time_start_dt
print('time_start={}, time_end={}'.format(time_start_dt, time_end_dt))
# get slices
time_start_slice, time_end_slice = time_slice(time_start_dt, time_end_dt)
# get paths
paths = SlicePath(time_start_slice, time_end_slice, debug=False).paths()
# dryrun or really run
if dryrun: # only print paths list
print('Dryrun mode')
for x in paths:
print(x)
else: # copy
paths_copy(srcdir, dstdir, paths)
return 0
if __name__ == '__main__':
cmd_go()
| 37.098131
| 120
| 0.641517
|
f41f88e5bdc64ed1e1aa716b6b3cf67248dd7d9d
| 1,493
|
py
|
Python
|
Classification Based Machine Learning for Algorithmic Trading/Predict Next Day Return/spyder_QDA.py
|
gopalm-ai/Machine-Learning-For-Finance
|
ae5527bf5b808c4be3c280823387256af88b5b04
|
[
"MIT"
] | 227
|
2017-07-20T11:25:50.000Z
|
2022-03-13T18:05:01.000Z
|
Classification Based Machine Learning for Algorithmic Trading/Predict Next Day Return/spyder_QDA.py
|
xaviergoby/Machine-Learning-For-Finance
|
2a07f9190cc58c1e540899ad97597a5bd9172b1c
|
[
"MIT"
] | null | null | null |
Classification Based Machine Learning for Algorithmic Trading/Predict Next Day Return/spyder_QDA.py
|
xaviergoby/Machine-Learning-For-Finance
|
2a07f9190cc58c1e540899ad97597a5bd9172b1c
|
[
"MIT"
] | 128
|
2017-07-26T05:54:55.000Z
|
2022-03-28T05:35:40.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 16:51:39 2017
@author: AnthonyN
https://www.quantstart.com/articles/Forecasting-Financial-Time-Series-Part-1
Predicting Price Returns
"""
import numpy as np
import pandas as pd
lags = 5
start_test = pd.to_datetime('2017-06-18')
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
ts = pd.read_csv('data\XMA.csv', index_col='Date')
ts.index = pd.to_datetime(ts.index)
tslag = ts[['XMA']].copy()
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["XMA"].shift(i+1)
tslag["returns"] = tslag["XMA"].pct_change()
# Create the lagged percentage returns columns
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["Lag_" + str(i+1)].pct_change()
tslag.fillna(0, inplace=True)
tslag["Direction"] = np.sign(tslag["returns"])
# Use the prior two days of returns as predictor values, with direction as the response
X = tslag[["Lag_1", "Lag_2"]]
y = tslag["Direction"]
# Create training and test sets
X_train = X[X.index < start_test]
X_test = X[X.index >= start_test]
y_train = y[y.index < start_test]
y_test = y[y.index >= start_test]
# Create prediction DataFrame
pred = pd.DataFrame(index=y_test.index)
qda = QDA()
qda.fit(X_train, y_train)
y_pred = qda.predict(X_test)
# pred = (1.0 + y_pred * y_test)/2.0
pred = (1.0 + (y_pred == y_test))/2.0
hit_rate = np.mean(pred)
print('QDA {:.4f}'.format(hit_rate))
| 21.955882
| 88
| 0.656397
|
9c35ad480bb7634ffca9fa97a21654d15ae43ca7
| 14,967
|
py
|
Python
|
run.py
|
BoldingBruggeman/nemo-medusa-cssm
|
8e3a3a1bdfcd8c7d2e536a64c653a600b98b865e
|
[
"CC-BY-4.0"
] | null | null | null |
run.py
|
BoldingBruggeman/nemo-medusa-cssm
|
8e3a3a1bdfcd8c7d2e536a64c653a600b98b865e
|
[
"CC-BY-4.0"
] | null | null | null |
run.py
|
BoldingBruggeman/nemo-medusa-cssm
|
8e3a3a1bdfcd8c7d2e536a64c653a600b98b865e
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import annotations
import sys
import os
import glob
import datetime
import argparse
import re
import shutil
from typing import Optional
import yaml
import numpy
from matplotlib import pyplot
from matplotlib.dates import datestr2num, date2num, num2date
import netCDF4
start_time = None #datetime.datetime(2009, 1, 1)
stop_time = None #datetime.datetime(2012, 1, 1)
sys.path.insert(0, './extern/fabm-mizer/python')
import mizer
# Function for converting from Equivalent Spherical Diameter (micrometer) to wet mass in g
def esd2mass(d): # d: equivalent spherical diameter in micrometer
V = 4./3. * numpy.pi * (numpy.array(d) / 2e6)**3 # V: volume in m3
return V * 1e6 # mass in g approximately equals volume in m3 multiplied by 1e6 (assumes density of 1000 kg/m3)
w_esd2, w_esd20, w_esd200 = esd2mass([2., 20., 200.])
preylist = []
preylist.append(('diatoms', 'PHD', (w_esd20, w_esd200), 6.625))
preylist.append(('non-diatoms', 'PHN', (w_esd2, w_esd20 ), 6.625))
preylist.append(('microzooplankton', 'ZMI', (w_esd20, w_esd200), 5.625))
preylist.append(('mesozooplankton', 'ZME', (w_esd200, 1e-3 ), 5.625))
temp_name = 'votemper'
time_name = 'time_counter'
# Parameters of the size spectrum model (mizer, http://dx.doi.org/10.1111/2041-210X.12256)
parameters = dict(
# spectrum partition
w_min=1e-3, # minimum size for the predator spectrum (g)
w_inf=1e6, # maximum size for the predator spectrum (g)
nclass=100, # number of size classes for the predator spectrum
# temperature dependence
T_dependence=1, # temperature dependence of rates (0=none, 1=Arrhenius)
T_ref=13., # reference temperature at which all rates must be given (degrees Celsius)
E_a=0.63, # activation energy for Arrhenius relationship (eV)
# predator-prey preference
beta=100, # optimal predator : prey wet mass ratio (-)
sigma=float(numpy.log(10.)), # standard devation of predator-prey preference (ln g)
# clearance, ingestion, growth efficiency
gamma=156, # scale factor for clearance rate (m3/yr/g^q) = actual rate for individuals of 1 g
q=0.82, # allometric exponent for clearance rate
h=1e9, # scale factor for maximum ingestion rate (g/yr/g^n)
#n=2./3., # allometric exponent for maximum ingestion rate
alpha=0.2, # gross growth efficiency or assimilation efficiency (-)
ks=0., # standard metabolism (1/yr/g^p)
# mortality
z0_type=1, # type of intrinsic mortality (0=constant, 1=allometric function)
z0pre=0.1, # scale factor for intrinsic mortality (1/yr/g^z0exp)
z0exp=-0.25, # allometric exponent for intrinsic mortality
z_spre=0.2, # scale factor for senescence mortality (1/yr)
w_s=1000., # reference ("starting") individual wet mass for senescence mortality (g)
z_s=0.3, # allometric exponent for senescence mortality
# recruitment
SRR=0, # stock-recruitment relationship (0=constant recruitment, 1=equal to reproductive output, 2=Beverton-Holt)
recruitment=0., # constant recruitment rate for smallest size class (#/yr)
# fishing
fishing_type=1, # fishing type (0=none, 1: knife-edge, 2: logistic/sigmoid, 3: linearly increasing)
w_minF=1.25, # minimum individual wet mass for fisheries mortality
F=0.4 # maximum fisheries mortality (knife-edge: constant mortality for individuals > w_minF)
)
def add_variable(nc: netCDF4.Dataset, name: str, long_name: str, units: str, data=None, dimensions: Optional[tuple[str, ...]]=None, zlib: bool=False, contiguous: bool=True, dtype=numpy.float32):
if dimensions is None:
dimensions = (time_name,)
chunksizes = [1] * len(dimensions)
if time_name in dimensions:
chunksizes[dimensions.index(time_name)] = len(nc.dimensions[time_name])
ncvar = nc.createVariable(name, dtype, dimensions, zlib=zlib, fill_value=-2e20, contiguous=contiguous, chunksizes=chunksizes)
if data is not None:
ncvar[:] = data
ncvar.long_name = long_name
ncvar.units = units
if 'x' in dimensions and 'y' in dimensions and 'nav_lon' in nc.variables and 'nav_lat' in nc.variables:
ncvar.coordinates = 'nav_lon nav_lat'
return ncvar
def copy_variable(nc: netCDF4.Dataset, ncvar: netCDF4.Variable, **kwargs):
ncvar_out = nc.createVariable(ncvar.name, ncvar.dtype, ncvar.dimensions, fill_value=getattr(ncvar, '_FillValue', None), **kwargs)
for key in ncvar.ncattrs():
if key != '_FillValue':
setattr(ncvar_out, key, getattr(ncvar, key))
if 'x' in ncvar.dimensions and 'y' in ncvar.dimensions and 'nav_lon' in nc.variables and 'nav_lat' in nc.variables:
ncvar_out.coordinates = 'nav_lon nav_lat'
ncvar_out[...] = ncvar[...]
return ncvar_out
def process_location(args: tuple[str, int, int]):
path, i, j = args
print('Processing %s for i=%i, j=%i...' % (path, i, j))
prey = []
for name, ncname, size_range, CN_ratio in preylist:
scale_factor = 10 * 0.001 * 12 * CN_ratio # 10 g wet mass/g carbon * 0.001 g C/mg C * 12 mg C/mmol C * mmol C/mmol N
timeseries = mizer.datasources.TimeSeries(path, ncname, scale_factor=scale_factor, time_name=time_name, x=i, y=j, stop=stop_time)
times = timeseries.times
prey.append(mizer.Prey(name, size_range, timeseries))
prey_collection = mizer.PreyCollection(*prey)
prey_collection = mizer.GriddedPreyCollection(prey_collection)
# environment: temperature and depth of the layer over which fish interact
temp = mizer.datasources.TimeSeries(path, temp_name, time_name=time_name, x=i, y=j, stop=stop_time)
depth = mizer.datasources.TimeSeries(path, 'bm_int**2/bm2_int', time_name=time_name, x=i, y=j, stop=stop_time)
# create mizer model
m = mizer.Mizer(prey=prey_collection, parameters=parameters, temperature=temp, recruitment_from_prey=True, depth=depth)
# Time-integrate
spinup = 50
istart = 0 if start_time is None else times.searchsorted(date2num(start_time))
istop = len(times) if stop_time is None else times.searchsorted(date2num(stop_time))
times = times[istart:istop]
result = m.run(times, spinup=spinup, verbose=True, save_spinup=False, save_loss_rates=True)
if result is None:
return
biomass = result.get_biomass_timeseries()
landings_var, landings = result.get_timeseries('landings')
lfi1 = result.get_lfi_timeseries(1.)
lfi80 = result.get_lfi_timeseries(80.)
lfi250 = result.get_lfi_timeseries(250.)
lfi500 = result.get_lfi_timeseries(500.)
lfi10000 = result.get_lfi_timeseries(10000.)
landings[1:] = landings[1:] - landings[:-1]
landings[0] = 0
return path, i, j, times, biomass, landings, lfi1, lfi80, lfi250, lfi500, lfi10000, m.bin_masses, result.spectrum, result.get_loss_rates()
def parallel_process_location(args, p):
import run
run.parameters = p
return run.process_location(args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source_path')
parser.add_argument('output_path')
parser.add_argument('--ncpus', type=int, default=None)
parser.add_argument('--ppservers', default=None)
parser.add_argument('--secret', default=None)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--parameters', default=None)
parser.add_argument('--shm', action='store_true')
parser.add_argument('--ntask', type=int)
args = parser.parse_args()
if args.parameters is not None:
with open(args.parameters, 'rU') as f:
args.parameters = yaml.safe_load(f)
parameters = args.parameters
if isinstance(args.ppservers, (str, u''.__class__)):
match = re.match(r'(.*)\[(.*)\](.*)', args.ppservers)
if match is not None:
# Hostnames in PBS/SLURM notation, e.g., node[01-06]
ppservers = []
left, middle, right = match.groups()
for item in middle.split(','):
if '-' in item:
start, stop = item.split('-')
for i in range(int(start), int(stop)+1):
ppservers.append('%s%s%s' % (left, str(i).zfill(len(start)), right))
else:
ppservers.append('%s%s%s' % (left, item, right))
else:
# Comma-separated hostnames
ppservers = args.ppservers.split(',')
ppservers = tuple(ppservers)
else:
assert args.ppservers is None
ppservers = ()
if args.ncpus is None:
args.ncpus = 'autodetect'
tasks = []
if not os.path.isdir(args.output_path):
os.mkdir(args.output_path)
for path in glob.glob(args.source_path):
with netCDF4.Dataset(path) as nc:
valid = (nc.variables[temp_name][...] != 0).any(axis=0)
if 'mask' in nc.variables:
valid = numpy.logical_and(valid, nc.variables['mask'][...] > 0)
for i in range(len(nc.dimensions['x'])):
for j in range(len(nc.dimensions['y'])):
if valid[j, i]:
tasks.append((path, i, j))
if args.ntask is not None:
tasks = tasks[:args.ntask]
source2output = {}
def get_output(source: str, times, bin_masses: numpy.ndarray, compress: bool=False, add_biomass_per_bin: bool=False, contiguous: bool=False, save_loss_rates: bool=False):
if source not in source2output:
with netCDF4.Dataset(path) as nc:
output_path = os.path.join(args.output_path, os.path.basename(source))
print('Creating %s....' % output_path)
ncout = netCDF4.Dataset(output_path, 'w')
nctime_in = nc.variables[time_name]
ncout.createDimension(time_name, len(times))
ncout.createDimension('x', len(nc.dimensions['x']))
ncout.createDimension('y', len(nc.dimensions['y']))
ncout.createDimension('bin', len(bin_masses))
nctime_out = ncout.createVariable(time_name, nctime_in.datatype, nctime_in.dimensions, zlib=compress, contiguous=contiguous)
nctime_out.units = nctime_in.units
dates = [dt.replace(tzinfo=None) for dt in num2date(times)]
nctime_out[...] = netCDF4.date2num(dates, nctime_out.units)
if 'nav_lon' in nc.variables:
copy_variable(ncout, nc.variables['nav_lon'], zlib=compress)
copy_variable(ncout, nc.variables['nav_lat'], zlib=compress)
add_variable(ncout, 'biomass', 'biomass', 'g WM/m2', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
add_variable(ncout, 'landings', 'landings', 'g WM', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
ncwm = add_variable(ncout, 'binmass', 'wet mass per individual', 'g WM', dimensions=('bin',), zlib=compress, contiguous=contiguous)
ncwm[:] = bin_masses
for wm in (1, 80, 250, 500, 10000):
add_variable(ncout, 'lfi%i' % wm, 'fraction of fish > %i g' % wm, '-', dimensions=(time_name, 'y', 'x'), zlib=compress, contiguous=contiguous)
if add_biomass_per_bin:
add_variable(ncout, 'Nw', 'biomass per bin', 'g WM/m2', dimensions=(time_name, 'y', 'x', 'bin'), zlib=compress, contiguous=contiguous)
if save_loss_rates:
add_variable(ncout, 'loss', 'loss rate per bin', 'd-1', dimensions=(time_name, 'y', 'x', 'bin'), zlib=compress, contiguous=contiguous)
source2output[source] = ncout
return source2output[source]
nsaved = 0
def save_result(result, sync: Optional[bool]=None, add_biomass_per_bin: bool=True, save_loss_rates: bool=True):
global nsaved
if result is None:
print('result %i: FAILED!' % nsaved)
return
print('result %i: saving...' % nsaved)
source, i, j, times, biomass, landings, lfi1, lfi80, lfi250, lfi500, lfi10000, bin_masses, spectrum, loss_rates = result
assert spectrum.shape == loss_rates.shape
print('saving results from %s, i=%i, j=%i' % (source, i, j))
ncout = get_output(source, times, bin_masses, add_biomass_per_bin=add_biomass_per_bin, save_loss_rates=save_loss_rates)
ncout.variables['biomass'][:, j, i] = biomass
ncout.variables['landings'][:, j, i] = landings
ncout.variables['lfi1'][:, j, i] = lfi1
ncout.variables['lfi80'][:, j, i] = lfi80
ncout.variables['lfi250'][:, j, i] = lfi250
ncout.variables['lfi500'][:, j, i] = lfi500
ncout.variables['lfi10000'][:, j, i] = lfi10000
if add_biomass_per_bin:
ncout.variables['Nw'][:, j, i, :] = spectrum[:, :]
if save_loss_rates:
ncout.variables['loss'][:, j, i, :] = loss_rates[:, :]
if nsaved % 1000 == 0 if sync is None else sync:
ncout.sync()
nsaved += 1
job_server = None
final_output_path = None
if args.ncpus == 1:
import cProfile
import pstats
def runSerial(n):
for i in range(n):
save_result(parallel_process_location(tasks[i], parameters), add_biomass_per_bin=True, save_loss_rates=True)
cProfile.run('runSerial(%s)' % min(len(tasks), 3), 'mizerprof')
p = pstats.Stats('mizerprof')
p.strip_dirs().sort_stats('cumulative').print_stats()
else:
if args.debug:
import logging
logging.basicConfig( level=logging.DEBUG)
import pp
if args.shm:
final_output_path = args.output_path
args.output_path = '/dev/shm'
job_server = pp.Server(ncpus=args.ncpus, ppservers=ppservers, restart=True, secret=args.secret)
for task in tasks:
job_server.submit(parallel_process_location, (task, parameters), callback=save_result)
job_server.wait()
job_server.print_stats()
for source, nc in source2output.items():
name = os.path.basename(source)
print('Closing %s...' % os.path.join(args.output_path, name))
nc.close()
if final_output_path is not None:
target = os.path.join(final_output_path, name)
if os.path.isfile(target):
os.remove(target)
shutil.move(os.path.join(args.output_path, name), target)
print('All results saved.')
if job_server is not None:
job_server.destroy()
| 48.911765
| 194
| 0.627714
|
f3397a81a2ff09432fd29180f196d5a4bd12708d
| 3,535
|
py
|
Python
|
tempest/api/compute/v3/admin/test_hypervisor.py
|
vmahuli/tempest
|
f70319f5eda72b8c8a913ae1002ec531324e4116
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/v3/admin/test_hypervisor.py
|
vmahuli/tempest
|
f70319f5eda72b8c8a913ae1002ec531324e4116
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/v3/admin/test_hypervisor.py
|
vmahuli/tempest
|
f70319f5eda72b8c8a913ae1002ec531324e4116
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import test
class HypervisorAdminV3Test(base.BaseV3ComputeAdminTest):
"""
Tests Hypervisors API that require admin privileges
"""
@classmethod
def setUpClass(cls):
super(HypervisorAdminV3Test, cls).setUpClass()
cls.client = cls.hypervisor_admin_client
def _list_hypervisors(self):
# List of hypervisors
resp, hypers = self.client.get_hypervisor_list()
self.assertEqual(200, resp.status)
return hypers
@test.attr(type='gate')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
@test.attr(type='gate')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
resp, hypers = self.client.get_hypervisor_list_details()
self.assertEqual(200, resp.status)
self.assertTrue(len(hypers) > 0)
@test.attr(type='gate')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
resp, details = (self.client.
get_hypervisor_show_details(hypers[0]['id']))
self.assertEqual(200, resp.status)
self.assertTrue(len(details) > 0)
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
@test.attr(type='gate')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
hypervisor_id = hypers[0]['id']
resp, hypervisors = self.client.get_hypervisor_servers(hypervisor_id)
self.assertEqual(200, resp.status)
self.assertTrue(len(hypervisors) > 0)
@test.attr(type='gate')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
resp, stats = self.client.get_hypervisor_stats()
self.assertEqual(200, resp.status)
self.assertTrue(len(stats) > 0)
@test.attr(type='gate')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
resp, uptime = self.client.get_hypervisor_uptime(hypers[0]['id'])
self.assertEqual(200, resp.status)
self.assertTrue(len(uptime) > 0)
@test.attr(type='gate')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
resp, hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])
self.assertEqual(200, resp.status)
self.assertTrue(len(hypers) > 0)
| 36.071429
| 78
| 0.672702
|
b6352c3c05a7c60ba699dbbf803458fed69c80f8
| 1,130
|
py
|
Python
|
user/migrations/0058_auto_20190503_1026.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-05-20T08:42:49.000Z
|
2020-05-20T08:42:49.000Z
|
user/migrations/0058_auto_20190503_1026.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 14
|
2020-03-24T17:31:08.000Z
|
2022-03-11T23:59:30.000Z
|
user/migrations/0058_auto_20190503_1026.py
|
akindele214/181hub_2
|
48b8814b5f66ad87f9a54721506076ddf70fe9bc
|
[
"MIT"
] | 1
|
2020-04-13T12:37:37.000Z
|
2020-04-13T12:37:37.000Z
|
# Generated by Django 2.1.5 on 2019-05-03 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0057_auto_20190503_1015'),
]
operations = [
migrations.AlterField(
model_name='monetization',
name='account_name',
field=models.CharField(max_length=140),
),
migrations.AlterField(
model_name='monetization',
name='account_num',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='monetization',
name='bank',
field=models.CharField(choices=[('first', 'First Bank Plc'), ('zenith', 'Zenith Bank Plc'), ('polaris', 'Polaris Bank Plc'), ('gt', 'GT Bank Plc'), ('access', 'Access Bank Plc')], default='gt', max_length=20),
),
migrations.AlterField(
model_name='monetization',
name='status',
field=models.CharField(choices=[('Cleared', 'Cleared'), ('uncleared', 'Uncleared')], default='uncleared', max_length=10),
),
]
| 33.235294
| 221
| 0.578761
|
0c0623a6f3f63322be25009558b8a60e99d1f038
| 46,876
|
py
|
Python
|
tests/test_iot/test_iot.py
|
moseb/moto
|
d596560971ae289f102b9aecb9939b3c49a56ac5
|
[
"Apache-2.0"
] | 1
|
2020-01-13T21:45:21.000Z
|
2020-01-13T21:45:21.000Z
|
tests/test_iot/test_iot.py
|
moseb/moto
|
d596560971ae289f102b9aecb9939b3c49a56ac5
|
[
"Apache-2.0"
] | 1
|
2016-02-11T14:54:01.000Z
|
2016-02-12T14:11:05.000Z
|
tests/test_iot/test_iot.py
|
moseb/moto
|
d596560971ae289f102b9aecb9939b3c49a56ac5
|
[
"Apache-2.0"
] | 2
|
2017-03-02T05:59:52.000Z
|
2020-09-03T13:25:44.000Z
|
from __future__ import unicode_literals
import json
import sure # noqa
import boto3
from moto import mock_iot
from botocore.exceptions import ClientError
from nose.tools import assert_raises
@mock_iot
def test_things():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-thing"
type_name = "my-type-name"
# thing type
thing_type = client.create_thing_type(thingTypeName=type_name)
thing_type.should.have.key("thingTypeName").which.should.equal(type_name)
thing_type.should.have.key("thingTypeArn")
res = client.list_thing_types()
res.should.have.key("thingTypes").which.should.have.length_of(1)
for thing_type in res["thingTypes"]:
thing_type.should.have.key("thingTypeName").which.should_not.be.none
thing_type = client.describe_thing_type(thingTypeName=type_name)
thing_type.should.have.key("thingTypeName").which.should.equal(type_name)
thing_type.should.have.key("thingTypeProperties")
thing_type.should.have.key("thingTypeMetadata")
# thing
thing = client.create_thing(thingName=name, thingTypeName=type_name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(1)
for thing in res["things"]:
thing.should.have.key("thingName").which.should_not.be.none
thing.should.have.key("thingArn").which.should_not.be.none
thing = client.update_thing(
thingName=name, attributePayload={"attributes": {"k1": "v1"}}
)
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(1)
for thing in res["things"]:
thing.should.have.key("thingName").which.should_not.be.none
thing.should.have.key("thingArn").which.should_not.be.none
res["things"][0]["attributes"].should.have.key("k1").which.should.equal("v1")
thing = client.describe_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("defaultClientId")
thing.should.have.key("thingTypeName")
thing.should.have.key("attributes")
thing.should.have.key("version")
# delete thing
client.delete_thing(thingName=name)
res = client.list_things()
res.should.have.key("things").which.should.have.length_of(0)
# delete thing type
client.delete_thing_type(thingTypeName=type_name)
res = client.list_thing_types()
res.should.have.key("thingTypes").which.should.have.length_of(0)
@mock_iot
def test_list_thing_types():
client = boto3.client("iot", region_name="ap-northeast-1")
for i in range(0, 100):
client.create_thing_type(thingTypeName=str(i + 1))
thing_types = client.list_thing_types()
thing_types.should.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(50)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("1")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("50")
thing_types = client.list_thing_types(nextToken=thing_types["nextToken"])
thing_types.should.have.key("thingTypes").which.should.have.length_of(50)
thing_types.should_not.have.key("nextToken")
thing_types["thingTypes"][0]["thingTypeName"].should.equal("51")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("100")
@mock_iot
def test_list_thing_types_with_typename_filter():
client = boto3.client("iot", region_name="ap-northeast-1")
client.create_thing_type(thingTypeName="thing")
client.create_thing_type(thingTypeName="thingType")
client.create_thing_type(thingTypeName="thingTypeName")
client.create_thing_type(thingTypeName="thingTypeNameGroup")
client.create_thing_type(thingTypeName="shouldNotFind")
client.create_thing_type(thingTypeName="find me it shall not")
thing_types = client.list_thing_types(thingTypeName="thing")
thing_types.should_not.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(4)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("thing")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("thingTypeNameGroup")
thing_types = client.list_thing_types(thingTypeName="thingTypeName")
thing_types.should_not.have.key("nextToken")
thing_types.should.have.key("thingTypes").which.should.have.length_of(2)
thing_types["thingTypes"][0]["thingTypeName"].should.equal("thingTypeName")
thing_types["thingTypes"][-1]["thingTypeName"].should.equal("thingTypeNameGroup")
@mock_iot
def test_list_things_with_next_token():
client = boto3.client("iot", region_name="ap-northeast-1")
for i in range(0, 200):
client.create_thing(thingName=str(i + 1))
things = client.list_things()
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("1")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/1")
things["things"][-1]["thingName"].should.equal("50")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/50"
)
things = client.list_things(nextToken=things["nextToken"])
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("51")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/51"
)
things["things"][-1]["thingName"].should.equal("100")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/100"
)
things = client.list_things(nextToken=things["nextToken"])
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("101")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/101"
)
things["things"][-1]["thingName"].should.equal("150")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/150"
)
things = client.list_things(nextToken=things["nextToken"])
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("151")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/151"
)
things["things"][-1]["thingName"].should.equal("200")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/200"
)
@mock_iot
def test_list_things_with_attribute_and_thing_type_filter_and_next_token():
client = boto3.client("iot", region_name="ap-northeast-1")
client.create_thing_type(thingTypeName="my-thing-type")
for i in range(0, 200):
if not (i + 1) % 3:
attribute_payload = {"attributes": {"foo": "bar"}}
elif not (i + 1) % 5:
attribute_payload = {"attributes": {"bar": "foo"}}
else:
attribute_payload = {}
if not (i + 1) % 2:
thing_type_name = "my-thing-type"
client.create_thing(
thingName=str(i + 1),
thingTypeName=thing_type_name,
attributePayload=attribute_payload,
)
else:
client.create_thing(
thingName=str(i + 1), attributePayload=attribute_payload
)
# Test filter for thingTypeName
things = client.list_things(thingTypeName=thing_type_name)
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("2")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/2")
things["things"][-1]["thingName"].should.equal("100")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/100"
)
all(item["thingTypeName"] == thing_type_name for item in things["things"])
things = client.list_things(
nextToken=things["nextToken"], thingTypeName=thing_type_name
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("102")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/102"
)
things["things"][-1]["thingName"].should.equal("200")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/200"
)
all(item["thingTypeName"] == thing_type_name for item in things["things"])
# Test filter for attributes
things = client.list_things(attributeName="foo", attributeValue="bar")
things.should.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(50)
things["things"][0]["thingName"].should.equal("3")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/3")
things["things"][-1]["thingName"].should.equal("150")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/150"
)
all(item["attributes"] == {"foo": "bar"} for item in things["things"])
things = client.list_things(
nextToken=things["nextToken"], attributeName="foo", attributeValue="bar"
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(16)
things["things"][0]["thingName"].should.equal("153")
things["things"][0]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/153"
)
things["things"][-1]["thingName"].should.equal("198")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/198"
)
all(item["attributes"] == {"foo": "bar"} for item in things["things"])
# Test filter for attributes and thingTypeName
things = client.list_things(
thingTypeName=thing_type_name, attributeName="foo", attributeValue="bar"
)
things.should_not.have.key("nextToken")
things.should.have.key("things").which.should.have.length_of(33)
things["things"][0]["thingName"].should.equal("6")
things["things"][0]["thingArn"].should.equal("arn:aws:iot:ap-northeast-1:1:thing/6")
things["things"][-1]["thingName"].should.equal("198")
things["things"][-1]["thingArn"].should.equal(
"arn:aws:iot:ap-northeast-1:1:thing/198"
)
all(
item["attributes"] == {"foo": "bar"}
and item["thingTypeName"] == thing_type_name
for item in things["things"]
)
@mock_iot
def test_certs():
client = boto3.client("iot", region_name="us-east-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("certificatePem").which.should_not.be.none
cert.should.have.key("keyPair")
cert["keyPair"].should.have.key("PublicKey").which.should_not.be.none
cert["keyPair"].should.have.key("PrivateKey").which.should_not.be.none
cert_id = cert["certificateId"]
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("certificateArn").which.should_not.be.none
cert_desc.should.have.key("certificateId").which.should_not.be.none
cert_desc.should.have.key("certificatePem").which.should_not.be.none
cert_desc.should.have.key("status").which.should.equal("ACTIVE")
cert_pem = cert_desc["certificatePem"]
res = client.list_certificates()
for cert in res["certificates"]:
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("status").which.should_not.be.none
cert.should.have.key("creationDate").which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("REVOKED")
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates")
# Test register_certificate flow
cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True)
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("certificateArn").which.should_not.be.none
cert_id = cert["certificateId"]
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
for cert in res["certificates"]:
cert.should.have.key("certificateArn").which.should_not.be.none
cert.should.have.key("certificateId").which.should_not.be.none
cert.should.have.key("status").which.should_not.be.none
cert.should.have.key("creationDate").which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
cert = client.describe_certificate(certificateId=cert_id)
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("REVOKED")
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates")
@mock_iot
def test_delete_policy_validation():
doc = """{
"Version": "2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"iot: *"
],
"Resource":"*"
}
]
}
"""
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
policy_name = "my-policy"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
with assert_raises(ClientError) as e:
client.delete_policy(policyName=policy_name)
e.exception.response["Error"]["Message"].should.contain(
"The policy cannot be deleted as the policy is attached to one or more principals (name=%s)"
% policy_name
)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
client.delete_policy(policyName=policy_name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
@mock_iot
def test_delete_certificate_validation():
doc = """{
"Version": "2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"iot: *"
],
"Resource":"*"
}
]
}
"""
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=True)
cert_id = cert["certificateId"]
cert_arn = cert["certificateArn"]
policy_name = "my-policy"
thing_name = "thing-1"
client.create_policy(policyName=policy_name, policyDocument=doc)
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
client.create_thing(thingName=thing_name)
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
with assert_raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.exception.response["Error"]["Message"].should.contain(
"Certificate must be deactivated (not ACTIVE) before deletion."
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.update_certificate(certificateId=cert_id, newStatus="REVOKED")
with assert_raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.exception.response["Error"]["Message"].should.contain(
"Things must be detached before deletion (arn: %s)" % cert_arn
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
with assert_raises(ClientError) as e:
client.delete_certificate(certificateId=cert_id)
e.exception.response["Error"]["Message"].should.contain(
"Certificate policies must be detached before deletion (arn: %s)" % cert_arn
)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(1)
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key("certificates").which.should.have.length_of(0)
@mock_iot
def test_certs_create_inactive():
client = boto3.client("iot", region_name="ap-northeast-1")
cert = client.create_keys_and_certificate(setAsActive=False)
cert_id = cert["certificateId"]
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("INACTIVE")
client.update_certificate(certificateId=cert_id, newStatus="ACTIVE")
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key("certificateDescription")
cert_desc = cert["certificateDescription"]
cert_desc.should.have.key("status").which.should.equal("ACTIVE")
@mock_iot
def test_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=name, policyDocument=doc)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("policyVersionId").which.should.equal("1")
policy = client.get_policy(policyName=name)
policy.should.have.key("policyName").which.should.equal(name)
policy.should.have.key("policyArn").which.should_not.be.none
policy.should.have.key("policyDocument").which.should.equal(doc)
policy.should.have.key("defaultVersionId").which.should.equal("1")
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
client.delete_policy(policyName=name)
res = client.list_policies()
res.should.have.key("policies").which.should.have.length_of(0)
@mock_iot
def test_principal_policy():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
# do nothing if policy have already attached to certificate
client.attach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_policy(policyName=policy_name, target=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(0)
with assert_raises(ClientError) as e:
client.detach_policy(policyName=policy_name, target=cert_arn)
e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException")
@mock_iot
def test_principal_policy_deprecated():
client = boto3.client("iot", region_name="ap-northeast-1")
policy_name = "my-policy"
doc = "{}"
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(1)
for policy in res["policies"]:
policy.should.have.key("policyName").which.should_not.be.none
policy.should.have.key("policyArn").which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key("policies").which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key("principals").which.should.have.length_of(0)
@mock_iot
def test_principal_thing():
client = boto3.client("iot", region_name="ap-northeast-1")
thing_name = "my-thing"
thing = client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(1)
for thing in res["things"]:
thing.should_not.be.none
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key("principals").which.should.have.length_of(1)
for principal in res["principals"]:
principal.should_not.be.none
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(0)
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key("principals").which.should.have.length_of(0)
@mock_iot
def test_delete_principal_thing():
client = boto3.client("iot", region_name="ap-northeast-1")
thing_name = "my-thing"
thing = client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert["certificateArn"]
cert_id = cert["certificateId"]
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
client.delete_thing(thingName=thing_name)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key("things").which.should.have.length_of(0)
client.update_certificate(certificateId=cert_id, newStatus="INACTIVE")
client.delete_certificate(certificateId=cert_id)
@mock_iot
def test_describe_thing_group_metadata_hierarchy():
client = boto3.client("iot", region_name="ap-northeast-1")
group_name_1a = "my-group-name-1a"
group_name_1b = "my-group-name-1b"
group_name_2a = "my-group-name-2a"
group_name_2b = "my-group-name-2b"
group_name_3a = "my-group-name-3a"
group_name_3b = "my-group-name-3b"
group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d"
# --1a
# |--2a
# | |--3a
# | |--3b
# |
# |--2b
# |--3c
# |--3d
# --1b
# create thing groups tree
# 1
thing_group1a = client.create_thing_group(thingGroupName=group_name_1a)
thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a)
thing_group1a.should.have.key("thingGroupArn")
thing_group1b = client.create_thing_group(thingGroupName=group_name_1b)
thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b)
thing_group1b.should.have.key("thingGroupArn")
# 2
thing_group2a = client.create_thing_group(
thingGroupName=group_name_2a, parentGroupName=group_name_1a
)
thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a)
thing_group2a.should.have.key("thingGroupArn")
thing_group2b = client.create_thing_group(
thingGroupName=group_name_2b, parentGroupName=group_name_1a
)
thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b)
thing_group2b.should.have.key("thingGroupArn")
# 3
thing_group3a = client.create_thing_group(
thingGroupName=group_name_3a, parentGroupName=group_name_2a
)
thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a)
thing_group3a.should.have.key("thingGroupArn")
thing_group3b = client.create_thing_group(
thingGroupName=group_name_3b, parentGroupName=group_name_2a
)
thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b)
thing_group3b.should.have.key("thingGroupArn")
thing_group3c = client.create_thing_group(
thingGroupName=group_name_3c, parentGroupName=group_name_2b
)
thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c)
thing_group3c.should.have.key("thingGroupArn")
thing_group3d = client.create_thing_group(
thingGroupName=group_name_3d, parentGroupName=group_name_2b
)
thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d)
thing_group3d.should.have.key("thingGroupArn")
# describe groups
# groups level 1
# 1a
thing_group_description1a = client.describe_thing_group(
thingGroupName=group_name_1a
)
thing_group_description1a.should.have.key("thingGroupName").which.should.equal(
group_name_1a
)
thing_group_description1a.should.have.key("thingGroupProperties")
thing_group_description1a.should.have.key("thingGroupMetadata")
thing_group_description1a["thingGroupMetadata"].should.have.key("creationDate")
thing_group_description1a.should.have.key("version")
# 1b
thing_group_description1b = client.describe_thing_group(
thingGroupName=group_name_1b
)
thing_group_description1b.should.have.key("thingGroupName").which.should.equal(
group_name_1b
)
thing_group_description1b.should.have.key("thingGroupProperties")
thing_group_description1b.should.have.key("thingGroupMetadata")
thing_group_description1b["thingGroupMetadata"].should.have.length_of(1)
thing_group_description1b["thingGroupMetadata"].should.have.key("creationDate")
thing_group_description1b.should.have.key("version")
# groups level 2
# 2a
thing_group_description2a = client.describe_thing_group(
thingGroupName=group_name_2a
)
thing_group_description2a.should.have.key("thingGroupName").which.should.equal(
group_name_2a
)
thing_group_description2a.should.have.key("thingGroupProperties")
thing_group_description2a.should.have.key("thingGroupMetadata")
thing_group_description2a["thingGroupMetadata"].should.have.length_of(3)
thing_group_description2a["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_1a)
thing_group_description2a["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description2a["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(1)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description2a.should.have.key("version")
# 2b
thing_group_description2b = client.describe_thing_group(
thingGroupName=group_name_2b
)
thing_group_description2b.should.have.key("thingGroupName").which.should.equal(
group_name_2b
)
thing_group_description2b.should.have.key("thingGroupProperties")
thing_group_description2b.should.have.key("thingGroupMetadata")
thing_group_description2b["thingGroupMetadata"].should.have.length_of(3)
thing_group_description2b["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_1a)
thing_group_description2b["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description2b["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(1)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description2b.should.have.key("version")
# groups level 3
# 3a
thing_group_description3a = client.describe_thing_group(
thingGroupName=group_name_3a
)
thing_group_description3a.should.have.key("thingGroupName").which.should.equal(
group_name_3a
)
thing_group_description3a.should.have.key("thingGroupProperties")
thing_group_description3a.should.have.key("thingGroupMetadata")
thing_group_description3a["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3a["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2a)
thing_group_description3a["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3a["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(thing_group2a["thingGroupArn"])
thing_group_description3a.should.have.key("version")
# 3b
thing_group_description3b = client.describe_thing_group(
thingGroupName=group_name_3b
)
thing_group_description3b.should.have.key("thingGroupName").which.should.equal(
group_name_3b
)
thing_group_description3b.should.have.key("thingGroupProperties")
thing_group_description3b.should.have.key("thingGroupMetadata")
thing_group_description3b["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3b["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2a)
thing_group_description3b["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3b["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(thing_group2a["thingGroupArn"])
thing_group_description3b.should.have.key("version")
# 3c
thing_group_description3c = client.describe_thing_group(
thingGroupName=group_name_3c
)
thing_group_description3c.should.have.key("thingGroupName").which.should.equal(
group_name_3c
)
thing_group_description3c.should.have.key("thingGroupProperties")
thing_group_description3c.should.have.key("thingGroupMetadata")
thing_group_description3c["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3c["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2b)
thing_group_description3c["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3c["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2b)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(thing_group2b["thingGroupArn"])
thing_group_description3c.should.have.key("version")
# 3d
thing_group_description3d = client.describe_thing_group(
thingGroupName=group_name_3d
)
thing_group_description3d.should.have.key("thingGroupName").which.should.equal(
group_name_3d
)
thing_group_description3d.should.have.key("thingGroupProperties")
thing_group_description3d.should.have.key("thingGroupMetadata")
thing_group_description3d["thingGroupMetadata"].should.have.length_of(3)
thing_group_description3d["thingGroupMetadata"].should.have.key(
"parentGroupName"
).being.equal(group_name_2b)
thing_group_description3d["thingGroupMetadata"].should.have.key(
"rootToParentThingGroups"
)
thing_group_description3d["thingGroupMetadata"][
"rootToParentThingGroups"
].should.have.length_of(2)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupName"
].should.match(group_name_1a)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn"
].should.match(thing_group1a["thingGroupArn"])
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName"
].should.match(group_name_2b)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn"
].should.match(thing_group2b["thingGroupArn"])
thing_group_description3d.should.have.key("version")
@mock_iot
def test_thing_groups():
client = boto3.client("iot", region_name="ap-northeast-1")
group_name = "my-group-name"
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(1)
for thing_group in res["thingGroups"]:
thing_group.should.have.key("groupName").which.should_not.be.none
thing_group.should.have.key("groupArn").which.should_not.be.none
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupProperties")
thing_group.should.have.key("thingGroupMetadata")
thing_group.should.have.key("version")
# delete thing group
client.delete_thing_group(thingGroupName=group_name)
res = client.list_thing_groups()
res.should.have.key("thingGroups").which.should.have.length_of(0)
# props create test
props = {
"thingGroupDescription": "my first thing group",
"attributePayload": {"attributes": {"key1": "val01", "Key02": "VAL2"}},
}
thing_group = client.create_thing_group(
thingGroupName=group_name, thingGroupProperties=props
)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("key1").which.should.equal("val01")
res_props.should.have.key("Key02").which.should.equal("VAL2")
# props update test with merge
new_props = {"attributePayload": {"attributes": {"k3": "v3"}, "merge": True}}
client.update_thing_group(thingGroupName=group_name, thingGroupProperties=new_props)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("key1").which.should.equal("val01")
res_props.should.have.key("Key02").which.should.equal("VAL2")
res_props.should.have.key("k3").which.should.equal("v3")
# props update test
new_props = {"attributePayload": {"attributes": {"k4": "v4"}}}
client.update_thing_group(thingGroupName=group_name, thingGroupProperties=new_props)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupProperties").which.should.have.key(
"attributePayload"
).which.should.have.key("attributes")
res_props = thing_group["thingGroupProperties"]["attributePayload"]["attributes"]
res_props.should.have.key("k4").which.should.equal("v4")
res_props.should_not.have.key("key1")
@mock_iot
def test_thing_group_relations():
client = boto3.client("iot", region_name="ap-northeast-1")
name = "my-thing"
group_name = "my-group-name"
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key("thingGroupName").which.should.equal(group_name)
thing_group.should.have.key("thingGroupArn")
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# add in 4 way
client.add_thing_to_thing_group(thingGroupName=group_name, thingName=name)
client.add_thing_to_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingArn=thing["thingArn"]
)
client.add_thing_to_thing_group(
thingGroupName=group_name, thingArn=thing["thingArn"]
)
client.add_thing_to_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingName=name
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(1)
thing_groups = client.list_thing_groups_for_thing(thingName=name)
thing_groups.should.have.key("thingGroups")
thing_groups["thingGroups"].should.have.length_of(1)
# remove in 4 way
client.remove_thing_from_thing_group(thingGroupName=group_name, thingName=name)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingArn=thing["thingArn"]
)
client.remove_thing_from_thing_group(
thingGroupName=group_name, thingArn=thing["thingArn"]
)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group["thingGroupArn"], thingName=name
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(0)
# update thing group for thing
client.update_thing_groups_for_thing(thingName=name, thingGroupsToAdd=[group_name])
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(1)
client.update_thing_groups_for_thing(
thingName=name, thingGroupsToRemove=[group_name]
)
things = client.list_things_in_thing_group(thingGroupName=group_name)
things.should.have.key("things")
things["things"].should.have.length_of(0)
@mock_iot
def test_create_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job.should.have.key("description")
@mock_iot
def test_describe_job():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("documentSource")
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobArn")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("targets")
job.should.have.key("job").which.should.have.key("jobProcessDetails")
job.should.have.key("job").which.should.have.key("lastUpdatedAt")
job.should.have.key("job").which.should.have.key("createdAt")
job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key("job").which.should.have.key(
"targetSelection"
).which.should.equal("CONTINUOUS")
job.should.have.key("job").which.should.have.key("presignedUrlConfig")
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("roleArn").which.should.equal(
"arn:aws:iam::1:role/service-role/iot_job_role"
)
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("expiresInSec").which.should.equal(123)
job.should.have.key("job").which.should.have.key(
"jobExecutionsRolloutConfig"
).which.should.have.key("maximumPerMinute").which.should.equal(10)
@mock_iot
def test_describe_job_1():
client = boto3.client("iot", region_name="eu-west-1")
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key("thingName").which.should.equal(name)
thing.should.have.key("thingArn")
# job document
job_document = {"field": "value"}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
presignedUrlConfig={
"roleArn": "arn:aws:iam::1:role/service-role/iot_job_role",
"expiresInSec": 123,
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={"maximumPerMinute": 10},
)
job.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("jobArn")
job = client.describe_job(jobId=job_id)
job.should.have.key("job")
job.should.have.key("job").which.should.have.key("jobArn")
job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key("job").which.should.have.key("targets")
job.should.have.key("job").which.should.have.key("jobProcessDetails")
job.should.have.key("job").which.should.have.key("lastUpdatedAt")
job.should.have.key("job").which.should.have.key("createdAt")
job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key("job").which.should.have.key(
"targetSelection"
).which.should.equal("CONTINUOUS")
job.should.have.key("job").which.should.have.key("presignedUrlConfig")
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("roleArn").which.should.equal(
"arn:aws:iam::1:role/service-role/iot_job_role"
)
job.should.have.key("job").which.should.have.key(
"presignedUrlConfig"
).which.should.have.key("expiresInSec").which.should.equal(123)
job.should.have.key("job").which.should.have.key(
"jobExecutionsRolloutConfig"
).which.should.have.key("maximumPerMinute").which.should.equal(10)
| 41.593611
| 100
| 0.71463
|
ccbfa75e4b65257a227efc6f6cc0b3b95a4ca9c4
| 1,855
|
py
|
Python
|
ch_11/src/dice_server.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 43
|
2021-06-03T18:39:09.000Z
|
2022-03-29T20:32:13.000Z
|
ch_11/src/dice_server.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 9
|
2022-03-12T01:04:07.000Z
|
2022-03-12T01:05:01.000Z
|
ch_11/src/dice_server.py
|
real-slim-chadi/Python-Object-Oriented-Programming---4th-edition
|
7c486866171786b620795fa33a79ec9ac9a8ba1b
|
[
"MIT"
] | 36
|
2021-06-19T07:14:09.000Z
|
2022-03-12T22:17:09.000Z
|
"""
Python 3 Object-Oriented Programming
Chapter 11. Common Design Patterns
"""
import contextlib
import dice
import gzip
import io
import socket
from typing import cast, Callable, Tuple
class ZipRoller:
def __init__(self, dice: Callable[[bytes], bytes]) -> None:
self.dice_roller = dice
def __call__(self, request: bytes) -> bytes:
dice_roller = self.dice_roller
response = dice_roller(request)
buffer = io.BytesIO()
with gzip.GzipFile(fileobj=buffer, mode="w") as zipfile:
zipfile.write(response)
return buffer.getvalue()
Address = Tuple[str, int]
class LogRoller:
def __init__(self, dice: Callable[[bytes], bytes], remote_addr: Address) -> None:
self.dice_roller = dice
self.remote_addr = remote_addr
def __call__(self, request: bytes) -> bytes:
print(f"Receiving {request!r} from {self.remote_addr}")
dice_roller = self.dice_roller
response = dice_roller(request)
print(f"Sending {response!r} to {self.remote_addr}")
return response
def dice_response(client: socket.socket) -> None:
request = client.recv(1024)
try:
remote_addr = client.getpeername()
roller_1 = ZipRoller(dice.dice_roller)
roller_2 = LogRoller(roller_1, remote_addr=remote_addr)
response = roller_2(request)
except (ValueError, KeyError) as ex:
response = repr(ex).encode("utf-8")
client.send(response)
import contextlib
import socket
def main_3() -> None:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("localhost", 2401))
server.listen(1)
with contextlib.closing(server):
while True:
client, addr = server.accept()
dice_response(client)
client.close()
if __name__ == "__main__":
main_3()
| 25.763889
| 85
| 0.657682
|
07f07429e7a9732a21447b74c7d94d5ba497971f
| 5,533
|
py
|
Python
|
BlazeRAT/core/messages.py
|
swagkarna/BlazeRAT
|
fa711ebabde6b1bfa6b78480ac68212ded6deff5
|
[
"Apache-2.0"
] | 116
|
2020-09-01T16:55:24.000Z
|
2022-03-25T02:35:10.000Z
|
BlazeRAT/core/messages.py
|
MLX15/BlazeRAT
|
95b8fd903b553c2b2ad8c92907f993e33227d4e7
|
[
"Apache-2.0"
] | 1
|
2021-06-13T15:04:03.000Z
|
2021-06-13T15:05:51.000Z
|
BlazeRAT/core/messages.py
|
MLX15/BlazeRAT
|
95b8fd903b553c2b2ad8c92907f993e33227d4e7
|
[
"Apache-2.0"
] | 43
|
2020-09-02T14:42:30.000Z
|
2022-03-16T03:45:06.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Author : LimerBoy
github.com/LimerBoy/BlazeRAT
Notes :
The file is needed to store the text and,
if necessary, quickly translate it into other languages.
"""
""" Authorization messages """
class auth:
incorrect_token = "⛔️ Incorrect token, attempts left %i"
user_authorized = "🔑 User %s authorized as '%s',\nsession will expire at %s"
already_authorized = "⚠️ You are already authorized!"
not_authorized = "⛔️ Access denied, you need to authorize!"
permission_not_found = "⚠️ You don't have permissions to do this!"
user_deauthorized = "☠️ User %s deauthorized"
user_is_banned = "💣 Account is banned, reason '%s'"
""" Services messages """
class services:
# Desktop screenshot
desktop_screenshot_captured = "🌃 Desktop screenshot taken"
# Webcam screenshot
webcam_screenshot_captured = "📹 Webcam screenshot taken"
webcam_screenshot_button = "📹 Take screenshot"
webcam_start_recording_button = "▶️ Start video recording"
webcam_stop_recording_button = "⏹ Stop video recording"
webcam_select_action = "🎥 Select an action...\nDevice index is %i"
webcam_recording_started = "📸 Webcam recording started"
webcam_recording_stopped = "📷 Webcam recording stopped"
webcam_recording_not_started = "📷 Unable to stop recording because it was not started!"
webcam_recording_not_stopped = "📸 It is impossible to start recording as it is already started!"
webcam_failed_open = "📷 Failed to open webcam %i"
# System audio volume control
volume_get_level_button = "🔉 Current level is %i"
volume_set_level_button = "🔊 Set %i"
volume_get_level = "🔉 Current volume level is %i"
volume_set_level = "🔊 Changed volume level to %i"
# Micophone recorder
microphone_start_recording_button = "▶️ Start recording"
microphone_stop_recording_button = "⏹ Stop recording"
microphone_select_action = "🎤 Select an action..."
microphone_recording_started = "🎙 Recording started"
microphone_recording_stopped = "🎙 Recording stopped"
microphone_recording_not_started = "🎤 Unable to stop recording because it was not started!"
microphone_recording_not_stopped = "🎤 It is impossible to start recording as it is already started!"
# Keylogger controls
keylogger_start_recording_button = "▶️ Start logger"
keylogger_stop_recording_button = "⏹ Stop logger"
keylogger_get_logs_button = "⌨️ Retrieve logs"
keylogger_clean_logs_button = "♻️️ Clean logs"
keylogger_logs_received = "📄 Here is keylogger logs"
keylogger_logs_cleaned = "🚮 Keylogger logs cleaned"
keylogger_recording_started = "⌨️ Keylogger started"
keylogger_recording_stopped = "⌨️ Keylogger stopped"
keylogger_recording_not_started = "⁉️Unable to stop keylogger because it was not started!"
keylogger_recording_not_stopped = "⁉️It is impossible to start keylogger as it is already started!"
# Power controls
power_control = "🔋 Select power command:"
power_received = "🔋 Power event %s received"
power_shutdown = "🔻 Shutdown"
power_suspend = "🔻 Suspend"
power_reboot = "🔻 Reboot"
power_logout = "🔻 Log out"
# Location
location_success = "🗺 Location:\n\tLatitude: %f\n\tLongitude: %f\n\tRange: %i\n\tAddress: \"%s\"\n\n📡 %s"
location_gateway_detection_failed = "📡 Failed to get default gateway!"
location_arp_request_failed = "📡 Failed to get gateway mac address!"
location_api_request_failed = "📡 Failed to make API request!"
# Shell commands
shell_session_opened = "⚙️ Terminal session opened"
shell_session_closed = "⚙️ Terminal session closed"
shell_command_is_empty = "🐚 Input command is empty!"
shell_command_executed = "🐚 System command executed.\n%s"
shell_pwd_success = "📂 Current directory is:\n%s"
shell_chdir_success = "📂 Current directory changed to:\n%s"
shell_chdir_not_found = "📂 Directory not found:\n%s"
shell_chdir_not_a_dir = "📂 Not a directory:\n%s"
shell_chdir_failed = "📂 (%s)\nFailed to change directory to:\n%s"
# Process manager
taskmanager_process_list = "⚙ Taskmanager (%s) running %i processes:"
taskmanager_process_kill_success = "🔫 Process %s (%i) killed"
taskmanager_process_kill_failed = "🔫 Failed to kill process %i, error:\n%s"
# Wipe browsers data
wipe_files_count = "🧨 %i files will be deleted beyond recovery"
wipe_confirm = "♻️ Do you want to clean browsers data?"
wipe_agree = "✅ Wipe all data"
wipe_disagree = "🛑 NO!"
wipe_cancelled = "✅ Wipe cancelled"
wipe_removed = "🗑 Removed %i files from system"
# Installation
stub_install = "👻 Installing service..."
stub_uninstall = "🗑 Uninstalling service..."
""" File transfer and filemanager """
class file:
upload_path_not_found = "📄 File %s not found!"
download_file_success = "📄 File %s saved"
start_file_success = "📄 Start file:\n%s"
remove_directory_success = "🗑 Directory removed:\n%s"
remove_directory_failed = "🗑 (%s)\nFailed to remove directory:\n%s"
remove_file_success = "🗑 File removed:\n%s"
remove_file_failed = "🗑 (%s)\nFailed to remove file:\n%s"
""" User messages """
class user:
name_anonymous = "Anonymous"
help = ("""
🔑 *[Auth]*
/authorize <token>
/deauthorize
/permissions
🗃 *[Files]*
/download <file/dir>
/filemanager
👁🗨 *[Spying]*
/location
/keylogger
/information
/webcam <device>
/screenshot
/microphone
🐚 *[System]*
/taskmanager
/uninstall
/keyboard
/volume
/power
/shell
/wipe
""")
| 40.094203
| 109
| 0.706488
|
73be33e24384a63c06a8597a2196de806f066b80
| 13,483
|
py
|
Python
|
sdk/python/pulumi_google_native/cloudidentity/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/cloudidentity/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/cloudidentity/v1beta1/_inputs.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'DynamicGroupMetadataArgs',
'DynamicGroupQueryArgs',
'EntityKeyArgs',
'ExpiryDetailArgs',
'MembershipRoleRestrictionEvaluationArgs',
'MembershipRoleArgs',
'PosixGroupArgs',
'RestrictionEvaluationsArgs',
]
@pulumi.input_type
class DynamicGroupMetadataArgs:
def __init__(__self__, *,
queries: Optional[pulumi.Input[Sequence[pulumi.Input['DynamicGroupQueryArgs']]]] = None):
"""
Dynamic group metadata like queries and status.
:param pulumi.Input[Sequence[pulumi.Input['DynamicGroupQueryArgs']]] queries: Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
"""
if queries is not None:
pulumi.set(__self__, "queries", queries)
@property
@pulumi.getter
def queries(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DynamicGroupQueryArgs']]]]:
"""
Memberships will be the union of all queries. Only one entry with USER resource is currently supported. Customers can create up to 100 dynamic groups.
"""
return pulumi.get(self, "queries")
@queries.setter
def queries(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DynamicGroupQueryArgs']]]]):
pulumi.set(self, "queries", value)
@pulumi.input_type
class DynamicGroupQueryArgs:
def __init__(__self__, *,
query: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input['DynamicGroupQueryResourceType']] = None):
"""
Defines a query on a resource.
:param pulumi.Input[str] query: Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`
"""
if query is not None:
pulumi.set(__self__, "query", query)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
Query that determines the memberships of the dynamic group. Examples: All users with at least one `organizations.department` of engineering. `user.organizations.exists(org, org.department=='engineering')` All users with at least one location that has `area` of `foo` and `building_id` of `bar`. `user.locations.exists(loc, loc.area=='foo' && loc.building_id=='bar')` All users with any variation of the name John Doe (case-insensitive queries add `equalsIgnoreCase()` to the value being queried). `user.name.value.equalsIgnoreCase('jOhn DoE')`
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input['DynamicGroupQueryResourceType']]:
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input['DynamicGroupQueryResourceType']]):
pulumi.set(self, "resource_type", value)
@pulumi.input_type
class EntityKeyArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None):
"""
A unique identifier for an entity in the Cloud Identity Groups API. An entity can represent either a group with an optional `namespace` or a user without a `namespace`. The combination of `id` and `namespace` must be unique; however, the same `id` can be used with different `namespace`s.
:param pulumi.Input[str] id: The ID of the entity. For Google-managed entities, the `id` must be the email address of an existing group or user. For external-identity-mapped entities, the `id` must be a string conforming to the Identity Source's requirements. Must be unique within a `namespace`.
:param pulumi.Input[str] namespace: The namespace in which the entity exists. If not specified, the `EntityKey` represents a Google-managed entity such as a Google user or a Google Group. If specified, the `EntityKey` represents an external-identity-mapped group. The namespace must correspond to an identity source created in Admin Console and must be in the form of `identitysources/{identity_source_id}`.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the entity. For Google-managed entities, the `id` must be the email address of an existing group or user. For external-identity-mapped entities, the `id` must be a string conforming to the Identity Source's requirements. Must be unique within a `namespace`.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace in which the entity exists. If not specified, the `EntityKey` represents a Google-managed entity such as a Google user or a Google Group. If specified, the `EntityKey` represents an external-identity-mapped group. The namespace must correspond to an identity source created in Admin Console and must be in the form of `identitysources/{identity_source_id}`.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class ExpiryDetailArgs:
def __init__(__self__, *,
expire_time: Optional[pulumi.Input[str]] = None):
"""
The `MembershipRole` expiry details.
:param pulumi.Input[str] expire_time: The time at which the `MembershipRole` will expire.
"""
if expire_time is not None:
pulumi.set(__self__, "expire_time", expire_time)
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> Optional[pulumi.Input[str]]:
"""
The time at which the `MembershipRole` will expire.
"""
return pulumi.get(self, "expire_time")
@expire_time.setter
def expire_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expire_time", value)
@pulumi.input_type
class MembershipRoleRestrictionEvaluationArgs:
def __init__(__self__):
"""
The evaluated state of this restriction.
"""
pass
@pulumi.input_type
class MembershipRoleArgs:
def __init__(__self__, *,
expiry_detail: Optional[pulumi.Input['ExpiryDetailArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
restriction_evaluations: Optional[pulumi.Input['RestrictionEvaluationsArgs']] = None):
"""
A membership role within the Cloud Identity Groups API. A `MembershipRole` defines the privileges granted to a `Membership`.
:param pulumi.Input['ExpiryDetailArgs'] expiry_detail: The expiry details of the `MembershipRole`. Expiry details are only supported for `MEMBER` `MembershipRoles`. May be set if `name` is `MEMBER`. Must not be set if `name` is any other value.
:param pulumi.Input[str] name: The name of the `MembershipRole`. Must be one of `OWNER`, `MANAGER`, `MEMBER`.
:param pulumi.Input['RestrictionEvaluationsArgs'] restriction_evaluations: Evaluations of restrictions applied to parent group on this membership.
"""
if expiry_detail is not None:
pulumi.set(__self__, "expiry_detail", expiry_detail)
if name is not None:
pulumi.set(__self__, "name", name)
if restriction_evaluations is not None:
pulumi.set(__self__, "restriction_evaluations", restriction_evaluations)
@property
@pulumi.getter(name="expiryDetail")
def expiry_detail(self) -> Optional[pulumi.Input['ExpiryDetailArgs']]:
"""
The expiry details of the `MembershipRole`. Expiry details are only supported for `MEMBER` `MembershipRoles`. May be set if `name` is `MEMBER`. Must not be set if `name` is any other value.
"""
return pulumi.get(self, "expiry_detail")
@expiry_detail.setter
def expiry_detail(self, value: Optional[pulumi.Input['ExpiryDetailArgs']]):
pulumi.set(self, "expiry_detail", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the `MembershipRole`. Must be one of `OWNER`, `MANAGER`, `MEMBER`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="restrictionEvaluations")
def restriction_evaluations(self) -> Optional[pulumi.Input['RestrictionEvaluationsArgs']]:
"""
Evaluations of restrictions applied to parent group on this membership.
"""
return pulumi.get(self, "restriction_evaluations")
@restriction_evaluations.setter
def restriction_evaluations(self, value: Optional[pulumi.Input['RestrictionEvaluationsArgs']]):
pulumi.set(self, "restriction_evaluations", value)
@pulumi.input_type
class PosixGroupArgs:
def __init__(__self__, *,
gid: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
system_id: Optional[pulumi.Input[str]] = None):
"""
POSIX Group definition to represent a group in a POSIX compliant system.
:param pulumi.Input[str] gid: GID of the POSIX group.
:param pulumi.Input[str] name: Name of the POSIX group.
:param pulumi.Input[str] system_id: System identifier for which group name and gid apply to. If not specified it will default to empty value.
"""
if gid is not None:
pulumi.set(__self__, "gid", gid)
if name is not None:
pulumi.set(__self__, "name", name)
if system_id is not None:
pulumi.set(__self__, "system_id", system_id)
@property
@pulumi.getter
def gid(self) -> Optional[pulumi.Input[str]]:
"""
GID of the POSIX group.
"""
return pulumi.get(self, "gid")
@gid.setter
def gid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gid", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the POSIX group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="systemId")
def system_id(self) -> Optional[pulumi.Input[str]]:
"""
System identifier for which group name and gid apply to. If not specified it will default to empty value.
"""
return pulumi.get(self, "system_id")
@system_id.setter
def system_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "system_id", value)
@pulumi.input_type
class RestrictionEvaluationsArgs:
def __init__(__self__, *,
member_restriction_evaluation: Optional[pulumi.Input['MembershipRoleRestrictionEvaluationArgs']] = None):
"""
Evaluations of restrictions applied to parent group on this membership.
:param pulumi.Input['MembershipRoleRestrictionEvaluationArgs'] member_restriction_evaluation: Evaluation of the member restriction applied to this membership. Empty if the user lacks permission to view the restriction evaluation.
"""
if member_restriction_evaluation is not None:
pulumi.set(__self__, "member_restriction_evaluation", member_restriction_evaluation)
@property
@pulumi.getter(name="memberRestrictionEvaluation")
def member_restriction_evaluation(self) -> Optional[pulumi.Input['MembershipRoleRestrictionEvaluationArgs']]:
"""
Evaluation of the member restriction applied to this membership. Empty if the user lacks permission to view the restriction evaluation.
"""
return pulumi.get(self, "member_restriction_evaluation")
@member_restriction_evaluation.setter
def member_restriction_evaluation(self, value: Optional[pulumi.Input['MembershipRoleRestrictionEvaluationArgs']]):
pulumi.set(self, "member_restriction_evaluation", value)
| 46.174658
| 583
| 0.680857
|
77936992548baca400e4bd814a2829fd8b1ccb05
| 7,922
|
py
|
Python
|
marge/single_merge_job.py
|
bgamari/marge-bot
|
9297e21d07e32b4bdad08e1df3b41cdae78930f1
|
[
"BSD-3-Clause"
] | null | null | null |
marge/single_merge_job.py
|
bgamari/marge-bot
|
9297e21d07e32b4bdad08e1df3b41cdae78930f1
|
[
"BSD-3-Clause"
] | 1
|
2019-02-06T21:54:55.000Z
|
2019-02-06T21:56:29.000Z
|
marge/single_merge_job.py
|
bgamari/marge-bot
|
9297e21d07e32b4bdad08e1df3b41cdae78930f1
|
[
"BSD-3-Clause"
] | 1
|
2019-02-06T21:23:53.000Z
|
2019-02-06T21:23:53.000Z
|
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
import logging as log
import time
from datetime import datetime
from . import git, gitlab
from .commit import Commit
from .job import CannotMerge, MergeJob, SkipMerge
from .project import Project
class SingleMergeJob(MergeJob):
def __init__(self, *, api, user, project, repo, options, merge_request):
super().__init__(api=api, user=user, project=project, repo=repo, options=options)
self._merge_request = merge_request
def execute(self):
merge_request = self._merge_request
log.info('Processing !%s - %r', merge_request.iid, merge_request.title)
try:
approvals = merge_request.fetch_approvals()
self.update_merge_request_and_accept(approvals)
log.info('Successfully merged !%s.', merge_request.info['iid'])
except SkipMerge as err:
log.warning("Skipping MR !%s: %s", merge_request.info['iid'], err.reason)
except CannotMerge as err:
message = "I couldn't merge this branch: %s" % err.reason
log.warning(message)
self.unassign_from_mr(merge_request)
merge_request.comment(message)
except git.GitError:
log.exception('Unexpected Git error')
merge_request.comment('Something seems broken on my local git repo; check my logs!')
raise
except Exception:
log.exception('Unexpected Exception')
merge_request.comment("I'm broken on the inside, please somebody fix me... :cry:")
self.unassign_from_mr(merge_request)
raise
def update_merge_request_and_accept(self, approvals):
api = self._api
merge_request = self._merge_request
target_project = Project.fetch_by_id(merge_request.target_project_id, api=api)
updated_into_up_to_date_target_branch = False
while not updated_into_up_to_date_target_branch:
if target_project.only_allow_merge_if_pipeline_succeeds:
self.wait_for_ci_to_pass(merge_request, merge_request.sha)
time.sleep(2)
self.ensure_mergeable_mr(merge_request)
source_project, source_repo_url, _ = self.fetch_source_project(merge_request)
# NB. this will be a no-op if there is nothing to update/rewrite
target_sha, _updated_sha, actual_sha = self.update_from_target_branch_and_push(
merge_request,
source_repo_url=source_repo_url,
)
log.info('Commit id to merge %r (into: %r)', actual_sha, target_sha)
time.sleep(5)
sha_now = Commit.last_on_branch(source_project.id, merge_request.source_branch, api).id
# Make sure no-one managed to race and push to the branch in the
# meantime, because we're about to impersonate the approvers, and
# we don't want to approve unreviewed commits
if sha_now != actual_sha:
raise CannotMerge('Someone pushed to branch while we were trying to merge')
self.maybe_reapprove(merge_request, approvals)
if target_project.only_allow_merge_if_pipeline_succeeds:
self.wait_for_ci_to_pass(merge_request, actual_sha)
time.sleep(2)
self.ensure_mergeable_mr(merge_request)
try:
merge_request.accept(remove_branch=True, sha=actual_sha)
except gitlab.NotAcceptable as err:
new_target_sha = Commit.last_on_branch(self._project.id, merge_request.target_branch, api).id
# target_branch has moved under us since we updated, just try again
if new_target_sha != target_sha:
log.info('Someone was naughty and by-passed marge')
merge_request.comment(
"My job would be easier if people didn't jump the queue and push directly... *sigh*"
)
continue
# otherwise the source branch has been pushed to or something
# unexpected went wrong in either case, we expect the user to
# explicitly re-assign to marge (after resolving potential
# problems)
raise CannotMerge('Merge request was rejected by GitLab: %r' % err.error_message)
except gitlab.Unauthorized:
log.warning('Unauthorized!')
raise CannotMerge('My user cannot accept merge requests!')
except gitlab.NotFound as ex:
log.warning('Not Found!: %s', ex)
merge_request.refetch_info()
if merge_request.state == 'merged':
# someone must have hit "merge when build succeeds" and we lost the race,
# the branch is gone and we got a 404. Anyway, our job here is done.
# (see #33)
updated_into_up_to_date_target_branch = True
else:
log.warning('For the record, merge request state is %r', merge_request.state)
raise
except gitlab.MethodNotAllowed as ex:
log.warning('Not Allowed!: %s', ex)
merge_request.refetch_info()
if merge_request.work_in_progress:
raise CannotMerge(
'The request was marked as WIP as I was processing it (maybe a WIP commit?)'
)
elif merge_request.state == 'reopened':
raise CannotMerge(
'GitLab refused to merge this branch. I suspect that a Push Rule or a git-hook '
'is rejecting my commits; maybe my email needs to be white-listed?'
)
elif merge_request.state == 'closed':
raise CannotMerge('Someone closed the merge request while I was attempting to merge it.')
elif merge_request.state == 'merged':
# We are not covering any observed behaviour here, but if at this
# point the request is merged, our job is done, so no need to complain
log.info('Merge request is already merged, someone was faster!')
updated_into_up_to_date_target_branch = True
else:
raise CannotMerge(
"Gitlab refused to merge this request and I don't know why!" + (
" Maybe you have unresolved discussions?"
if self._project.only_allow_merge_if_all_discussions_are_resolved else ""
)
)
except gitlab.ApiError:
log.exception('Unanticipated ApiError from GitLab on merge attempt')
raise CannotMerge('had some issue with GitLab, check my logs...')
else:
self.wait_for_branch_to_be_merged()
updated_into_up_to_date_target_branch = True
def wait_for_branch_to_be_merged(self):
merge_request = self._merge_request
time_0 = datetime.utcnow()
waiting_time_in_secs = 10
while datetime.utcnow() - time_0 < self._merge_timeout:
merge_request.refetch_info()
if merge_request.state == 'merged':
return # success!
if merge_request.state == 'closed':
raise CannotMerge('someone closed the merge request while merging!')
assert merge_request.state in ('opened', 'reopened', 'locked'), merge_request.state
log.info('Giving %s more secs for !%s to be merged...', waiting_time_in_secs, merge_request.iid)
time.sleep(waiting_time_in_secs)
raise CannotMerge('It is taking too long to see the request marked as merged!')
| 49.204969
| 109
| 0.606917
|
549641fb37dc6b79358740c1dd3565defc3bb858
| 7,483
|
py
|
Python
|
core/source/ruattitudes/reader.py
|
nicolay-r/attitudes-extraction-ds
|
49a82843e6adbca35321aaaa08d05532e953a0fc
|
[
"MIT"
] | 3
|
2019-11-05T19:42:55.000Z
|
2021-02-25T12:14:17.000Z
|
core/source/ruattitudes/reader.py
|
nicolay-r/attitudes-extraction-ds
|
49a82843e6adbca35321aaaa08d05532e953a0fc
|
[
"MIT"
] | 1
|
2021-03-30T07:41:22.000Z
|
2021-03-30T07:41:22.000Z
|
core/source/ruattitudes/reader.py
|
nicolay-r/attitudes-extraction-ds
|
49a82843e6adbca35321aaaa08d05532e953a0fc
|
[
"MIT"
] | 1
|
2019-08-18T18:33:11.000Z
|
2019-08-18T18:33:11.000Z
|
# -*- coding: utf-8 -*-
from core.evaluation.labels import Label
from core.processing.lemmatization.base import Stemmer
from core.runtime.object import TextObject
from core.runtime.parser import TextParser
from core.runtime.ref_opinon import RefOpinion
from core.source.ruattitudes.news import ProcessedNews
from core.source.ruattitudes.sentence import ProcessedSentence
# TODO. Rename as RuAttitudesFormatReader.
class ContextsReader(object):
NEWS_SEP_KEY = u'--------'
FILE_KEY = u"Файл:"
OBJ_KEY = u"Oбъект:"
TITLE_KEY = u"Заголовок:"
SINDEX_KEY = u"Предложение:"
OPINION_KEY = u"Отношение:"
STEXT_KEY = u"Текст:"
TEXT_IND_KEY = u"Номер:"
TERMS_IN_TITLE = u"Термов в заголовке:"
TERMS_IN_TEXT = u"Термов в тексте:"
FRAMEVAR_TITLE = u"Вариант фрейма:"
def __iter__(self):
pass
@staticmethod
def iter_processed_news(filepath, stemmer=None):
assert(isinstance(filepath, unicode))
assert(isinstance(stemmer, Stemmer) or stemmer is None)
reset = False
title = None
title_terms_count = None
text_terms_count = None
processed_sentences = []
opinions_list = []
objects_list = []
s_index = 0
news_index = None
with open(filepath, 'r') as input:
for line in input.readlines():
line = line.decode('utf-8')
if ContextsReader.FILE_KEY in line:
pass
if ContextsReader.OBJ_KEY in line:
object = ContextsReader.__parse_object(line)
objects_list.append(object)
if ContextsReader.OPINION_KEY in line:
opinion = ContextsReader.__parse_opinion(line, objects_list=objects_list)
opinions_list.append(opinion)
if ContextsReader.FRAMEVAR_TITLE in line:
pass
if ContextsReader.TERMS_IN_TITLE in line:
title_terms_count = ContextsReader.__parse_terms_in_title_count(line)
if ContextsReader.TERMS_IN_TEXT in line:
text_terms_count = ContextsReader.__parse_terms_in_text_count(line)
if ContextsReader.SINDEX_KEY in line:
s_index = ContextsReader.__parse_sentence_index(line)
if ContextsReader.TEXT_IND_KEY in line:
news_index = ContextsReader.__parse_text_index(line)
if ContextsReader.TITLE_KEY in line:
title = ProcessedSentence(is_title=True,
parsed_text=ContextsReader.__parse_sentence(line,
is_title=True,
stemmer=stemmer),
ref_opinions=opinions_list,
objects_list=objects_list,
sentence_index=-1)
processed_sentences.append(title)
assert(title_terms_count == len(title.ParsedText) or title_terms_count is None)
reset = True
if ContextsReader.STEXT_KEY in line:
sentence = ProcessedSentence(is_title=False,
parsed_text=ContextsReader.__parse_sentence(line,
is_title=False,
stemmer=stemmer),
ref_opinions=opinions_list,
objects_list=objects_list,
sentence_index=s_index)
processed_sentences.append(sentence)
assert(text_terms_count == len(sentence.ParsedText) or text_terms_count is None)
reset = True
if ContextsReader.NEWS_SEP_KEY in line and title is not None:
yield ProcessedNews(processed_sentences=processed_sentences,
news_index=news_index)
processed_sentences = []
reset = True
if reset:
opinions_list = []
objects_list = []
title_terms_count = None
reset = False
if len(processed_sentences) > 0:
yield ProcessedNews(processed_sentences=processed_sentences,
news_index=news_index)
processed_sentences = []
assert(len(processed_sentences) == 0)
@staticmethod
def __parse_opinion(line, objects_list):
assert(isinstance(objects_list, list))
line = line[len(ContextsReader.OPINION_KEY):]
s_from = line.index(u'b:(')
s_to = line.index(u')', s_from)
label = Label.from_int(int(line[s_from+3:s_to]))
o_from = line.index(u'oi:[')
o_to = line.index(u']', o_from)
left_object_id, right_object_id = line[o_from+4:o_to].split(u',')
left_object_id = int(left_object_id)
right_object_id = int(right_object_id)
ref_opinion = RefOpinion(left_index=left_object_id,
right_index=right_object_id,
sentiment=label,
owner=objects_list)
s_from = line.index(u'si:{')
s_to = line.index(u'}', s_from)
opninion_key = line[s_from+4:s_to]
ref_opinion.set_tag(opninion_key)
return ref_opinion
@staticmethod
def __parse_object(line):
assert(isinstance(line, unicode))
line = line[len(ContextsReader.OBJ_KEY):]
o_begin = line.index(u"'", 0)
o_end = line.index(u"'", o_begin + 1)
b_from = line.index(u'b:(')
b_to = line.index(u')', b_from)
term_index, length = line[b_from+3:b_to].split(u',')
terms = line[o_begin+1:o_end].split(u',')
text_object = TextObject(terms=terms, position=int(term_index))
sg_from = line.index(u'si:{')
sg_to = line.index(u'}', sg_from)
group_index = int(line[sg_from+4:sg_to])
text_object.set_tag(group_index)
return text_object
@staticmethod
def __parse_sentence(line, is_title, stemmer):
assert(isinstance(is_title, bool))
assert(isinstance(stemmer, Stemmer) or stemmer is None)
key = ContextsReader.STEXT_KEY if not is_title else ContextsReader.TITLE_KEY
text = line[len(key):]
text = text.strip()
return TextParser.from_string(str=text, stemmer=stemmer)
@staticmethod
def __parse_terms_in_title_count(line):
line = line[len(ContextsReader.TERMS_IN_TITLE):]
return int(line)
@staticmethod
def __parse_terms_in_text_count(line):
line = line[len(ContextsReader.TERMS_IN_TEXT):]
return int(line)
@staticmethod
def __parse_sentence_index(line):
line = line[len(ContextsReader.SINDEX_KEY):]
return int(line)
@staticmethod
def __parse_text_index(line):
line = line[len(ContextsReader.TEXT_IND_KEY):]
return int(line)
| 37.603015
| 110
| 0.555927
|
876a6b29f51914860343a931c850722c1331f9a1
| 62,353
|
py
|
Python
|
src/gitchangelog/gitchangelog.py
|
dfugate/gitchangelog
|
7da12ad2efd0bbac412cf6ea75c08a1c9c9f1ab2
|
[
"BSD-3-Clause"
] | null | null | null |
src/gitchangelog/gitchangelog.py
|
dfugate/gitchangelog
|
7da12ad2efd0bbac412cf6ea75c08a1c9c9f1ab2
|
[
"BSD-3-Clause"
] | null | null | null |
src/gitchangelog/gitchangelog.py
|
dfugate/gitchangelog
|
7da12ad2efd0bbac412cf6ea75c08a1c9c9f1ab2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import locale
import re
import os
import os.path
import sys
import glob
import textwrap
import datetime
import collections
import traceback
import contextlib
import itertools
import errno
from subprocess import Popen, PIPE
try:
import pystache
except ImportError: ## pragma: no cover
pystache = None
try:
import mako
except ImportError: ## pragma: no cover
mako = None
__version__ = "%%version%%" ## replaced by autogen.sh
DEBUG = None
##
## Platform and python compatibility
##
PY_VERSION = float("%d.%d" % sys.version_info[0:2])
PY3 = PY_VERSION >= 3
try:
basestring
except NameError:
basestring = str ## pylint: disable=redefined-builtin
WIN32 = sys.platform == 'win32'
if WIN32:
PLT_CFG = {
'close_fds': False,
}
else:
PLT_CFG = {
'close_fds': True,
}
##
##
##
if WIN32 and not PY3:
## Sorry about the following, all this code is to ensure full
## compatibility with python 2.7 under windows about sending unicode
## command-line
import ctypes
import subprocess
import _subprocess
from ctypes import byref, windll, c_char_p, c_wchar_p, c_void_p, \
Structure, sizeof, c_wchar, WinError
from ctypes.wintypes import BYTE, WORD, LPWSTR, BOOL, DWORD, LPVOID, \
HANDLE
##
## Types
##
CREATE_UNICODE_ENVIRONMENT = 0x00000400
LPCTSTR = c_char_p
LPTSTR = c_wchar_p
LPSECURITY_ATTRIBUTES = c_void_p
LPBYTE = ctypes.POINTER(BYTE)
class STARTUPINFOW(Structure):
_fields_ = [
("cb", DWORD), ("lpReserved", LPWSTR),
("lpDesktop", LPWSTR), ("lpTitle", LPWSTR),
("dwX", DWORD), ("dwY", DWORD),
("dwXSize", DWORD), ("dwYSize", DWORD),
("dwXCountChars", DWORD), ("dwYCountChars", DWORD),
("dwFillAtrribute", DWORD), ("dwFlags", DWORD),
("wShowWindow", WORD), ("cbReserved2", WORD),
("lpReserved2", LPBYTE), ("hStdInput", HANDLE),
("hStdOutput", HANDLE), ("hStdError", HANDLE),
]
LPSTARTUPINFOW = ctypes.POINTER(STARTUPINFOW)
class PROCESS_INFORMATION(Structure):
_fields_ = [
("hProcess", HANDLE), ("hThread", HANDLE),
("dwProcessId", DWORD), ("dwThreadId", DWORD),
]
LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION)
class DUMMY_HANDLE(ctypes.c_void_p):
def __init__(self, *a, **kw):
super(DUMMY_HANDLE, self).__init__(*a, **kw)
self.closed = False
def Close(self):
if not self.closed:
windll.kernel32.CloseHandle(self)
self.closed = True
def __int__(self):
return self.value
CreateProcessW = windll.kernel32.CreateProcessW
CreateProcessW.argtypes = [
LPCTSTR, LPTSTR, LPSECURITY_ATTRIBUTES,
LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCTSTR,
LPSTARTUPINFOW, LPPROCESS_INFORMATION,
]
CreateProcessW.restype = BOOL
##
## Patched functions/classes
##
def CreateProcess(executable, args, _p_attr, _t_attr,
inherit_handles, creation_flags, env, cwd,
startup_info):
"""Create a process supporting unicode executable and args for win32
Python implementation of CreateProcess using CreateProcessW for Win32
"""
si = STARTUPINFOW(
dwFlags=startup_info.dwFlags,
wShowWindow=startup_info.wShowWindow,
cb=sizeof(STARTUPINFOW),
## XXXvlab: not sure of the casting here to ints.
hStdInput=int(startup_info.hStdInput),
hStdOutput=int(startup_info.hStdOutput),
hStdError=int(startup_info.hStdError),
)
wenv = None
if env is not None:
## LPCWSTR seems to be c_wchar_p, so let's say CWSTR is c_wchar
env = (unicode("").join([
unicode("%s=%s\0") % (k, v)
for k, v in env.items()])) + unicode("\0")
wenv = (c_wchar * len(env))()
wenv.value = env
pi = PROCESS_INFORMATION()
creation_flags |= CREATE_UNICODE_ENVIRONMENT
if CreateProcessW(executable, args, None, None,
inherit_handles, creation_flags,
wenv, cwd, byref(si), byref(pi)):
return (DUMMY_HANDLE(pi.hProcess), DUMMY_HANDLE(pi.hThread),
pi.dwProcessId, pi.dwThreadId)
raise WinError()
class Popen(subprocess.Popen):
"""This superseeds Popen and corrects a bug in cPython 2.7 implem"""
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Code from part of _execute_child from Python 2.7 (9fbb65e)
There are only 2 little changes concerning the construction of
the the final string in shell mode: we preempt the creation of
the command string when shell is True, because original function
will try to encode unicode args which we want to avoid to be able to
sending it as-is to ``CreateProcess``.
"""
if not isinstance(args, subprocess.types.StringTypes):
args = subprocess.list2cmdline(args)
if startupinfo is None:
startupinfo = subprocess.STARTUPINFO()
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", unicode("cmd.exe"))
args = unicode('{} /c "{}"').format(comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
w9xpopen = self._find_w9xpopen()
args = unicode('"%s" %s') % (w9xpopen, args)
creationflags |= _subprocess.CREATE_NEW_CONSOLE
super(Popen, self)._execute_child(args, executable,
preexec_fn, close_fds, cwd, env, universal_newlines,
startupinfo, creationflags, False, to_close, p2cread,
p2cwrite, c2pread, c2pwrite, errread, errwrite)
_subprocess.CreateProcess = CreateProcess
##
## Help and usage strings
##
usage_msg = """
%(exname)s {-h|--help}
%(exname)s {-v|--version}
%(exname)s [--debug|-d] [REVLIST]"""
description_msg = """\
Run this command in a git repository to output a formatted changelog
"""
epilog_msg = """\
%(exname)s uses a config file to filter meaningful commit or do some
formatting in commit messages thanks to a config file.
Config file location will be resolved in this order:
- in shell environment variable GITCHANGELOG_CONFIG_FILENAME
- in git configuration: ``git config gitchangelog.rc-path``
- as '.%(exname)s.rc' in the root of the current git repository
"""
##
## Shell command helper functions
##
def stderr(msg):
print(msg, file=sys.stderr)
def err(msg):
stderr("Error: " + msg)
def warn(msg):
stderr("Warning: " + msg)
def die(msg=None, errlvl=1):
if msg:
stderr(msg)
sys.exit(errlvl)
class ShellError(Exception):
def __init__(self, msg, errlvl=None, command=None, out=None, err=None):
self.errlvl = errlvl
self.command = command
self.out = out
self.err = err
super(ShellError, self).__init__(msg)
@contextlib.contextmanager
def set_cwd(directory):
curdir = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(curdir)
def format_last_exception(prefix=" | "):
"""Format the last exception for display it in tests.
This allows to raise custom exception, without loosing the context of what
caused the problem in the first place:
>>> def f():
... raise Exception("Something terrible happened")
>>> try: ## doctest: +ELLIPSIS
... f()
... except Exception:
... formated_exception = format_last_exception()
... raise ValueError('Oups, an error occured:\\n%s'
... % formated_exception)
Traceback (most recent call last):
...
ValueError: Oups, an error occured:
| Traceback (most recent call last):
...
| Exception: Something terrible happened
"""
return '\n'.join(
str(prefix + line)
for line in traceback.format_exc().strip().split('\n'))
##
## config file functions
##
_config_env = {
'WIN32': WIN32,
'PY3': PY3,
}
def available_in_config(f):
_config_env[f.__name__] = f
return f
def load_config_file(filename, default_filename=None,
fail_if_not_present=True):
"""Loads data from a config file."""
config = _config_env.copy()
for fname in [default_filename, filename]:
if fname and os.path.exists(fname):
if not os.path.isfile(fname):
die("config file path '%s' exists but is not a file !"
% (fname, ))
content = file_get_contents(fname)
try:
code = compile(content, fname, 'exec')
exec(code, config) ## pylint: disable=exec-used
except SyntaxError as e:
die('Syntax error in config file: %s\n%s'
'File %s, line %i'
% (str(e),
(indent(e.text.rstrip(), " | ") + "\n") if e.text else "",
e.filename, e.lineno))
else:
if fail_if_not_present:
die('%s config file is not found and is required.' % (fname, ))
return config
##
## Text functions
##
@available_in_config
class TextProc(object):
def __init__(self, fun):
self.fun = fun
if hasattr(fun, "__name__"):
self.__name__ = fun.__name__
def __call__(self, text):
return self.fun(text)
def __or__(self, value):
if isinstance(value, TextProc):
return TextProc(lambda text: value.fun(self.fun(text)))
import inspect
(_frame, filename, lineno, _function_name, lines, _index) = \
inspect.stack()[1]
raise SyntaxError("Invalid syntax in config file",
(filename, lineno, 0,
"Invalid chain with a non TextProc element %r:\n%s"
% (value, indent("".join(lines).strip(), " | "))))
def set_if_empty(text, msg="No commit message."):
if len(text):
return text
return msg
@TextProc
def ucfirst(msg):
if len(msg) == 0:
return msg
return msg[0].upper() + msg[1:]
@TextProc
def final_dot(msg):
if len(msg) and msg[-1].isalnum():
return msg + "."
return msg
def indent(text, chars=" ", first=None):
"""Return text string indented with the given chars
>>> string = 'This is first line.\\nThis is second line\\n'
>>> print(indent(string, chars="| ")) # doctest: +NORMALIZE_WHITESPACE
| This is first line.
| This is second line
|
>>> print(indent(string, first="- ")) # doctest: +NORMALIZE_WHITESPACE
- This is first line.
This is second line
>>> string = 'This is first line.\\n\\nThis is second line'
>>> print(indent(string, first="- ")) # doctest: +NORMALIZE_WHITESPACE
- This is first line.
<BLANKLINE>
This is second line
"""
if first:
first_line = text.split("\n")[0]
rest = '\n'.join(text.split("\n")[1:])
return '\n'.join([(first + first_line).rstrip(),
indent(rest, chars=chars)])
return '\n'.join([(chars + line).rstrip()
for line in text.split('\n')])
def paragraph_wrap(text, regexp="\n\n"):
r"""Wrap text by making sure that paragraph are separated correctly
>>> string = 'This is first paragraph which is quite long don\'t you \
... think ? Well, I think so.\n\nThis is second paragraph\n'
>>> print(paragraph_wrap(string)) # doctest: +NORMALIZE_WHITESPACE
This is first paragraph which is quite long don't you think ? Well, I
think so.
This is second paragraph
Notice that that each paragraph has been wrapped separately.
"""
regexp = re.compile(regexp, re.MULTILINE)
return "\n".join("\n".join(textwrap.wrap(paragraph.strip()))
for paragraph in regexp.split(text)).strip()
def curryfy(f):
return lambda *a, **kw: TextProc(lambda txt: f(txt, *a, **kw))
## these are curryfied version of their lower case definition
Indent = curryfy(indent)
Wrap = curryfy(paragraph_wrap)
ReSub = lambda p, r, **k: TextProc(lambda txt: re.sub(p, r, txt, **k))
noop = TextProc(lambda txt: txt)
strip = TextProc(lambda txt: txt.strip())
SetIfEmpty = curryfy(set_if_empty)
for _label in ("Indent", "Wrap", "ReSub", "noop", "final_dot",
"ucfirst", "strip", "SetIfEmpty"):
_config_env[_label] = locals()[_label]
##
## File
##
def file_get_contents(filename):
with open(filename) as f:
out = f.read()
if not PY3:
if not isinstance(out, unicode):
out = out.decode(_preferred_encoding)
## remove encoding declaration (for some reason, python 2.7
## don't like it).
out = re.sub(r"^(\s*#.*\s*)coding[:=]\s*([-\w.]+\s*;?\s*)",
r"\1", out, re.DOTALL)
return out
def file_put_contents(filename, string):
"""Write string to filename."""
if PY3:
fopen = open(filename, 'w', newline='')
else:
fopen = open(filename, 'wb')
with fopen as f:
f.write(string)
##
## Inferring revision
##
def _file_regex_match(filename, pattern, **kw):
if not os.path.isfile(filename):
raise IOError("Can't open file '%s'." % filename)
file_content = file_get_contents(filename)
match = re.search(pattern, file_content, **kw)
if match is None:
stderr("file content: %r" % file_content)
if isinstance(pattern, type(re.compile(''))):
pattern = pattern.pattern
raise ValueError(
"Regex %s did not match any substring in '%s'."
% (pattern, filename))
return match
@available_in_config
def FileFirstRegexMatch(filename, pattern):
def _call():
match = _file_regex_match(filename, pattern)
dct = match.groupdict()
if dct:
if "rev" not in dct:
warn("Named pattern used, but no one are named 'rev'. "
"Using full match.")
return match.group(0)
if dct['rev'] is None:
die("Named pattern used, but it was not valued.")
return dct['rev']
return match.group(0)
return _call
@available_in_config
def Caret(l):
def _call():
return "^%s" % eval_if_callable(l)
return _call
##
## System functions
##
## Note that locale.getpreferredencoding() does NOT follow
## PYTHONIOENCODING by default, but ``sys.stdout.encoding`` does. In
## PY2, ``sys.stdout.encoding`` without PYTHONIOENCODING set does not
## get any values set in subshells. However, if _preferred_encoding
## is not set to utf-8, it leads to encoding errors.
_preferred_encoding = os.environ.get("PYTHONIOENCODING") or \
locale.getpreferredencoding()
DEFAULT_GIT_LOG_ENCODING = 'utf-8'
class Phile(object):
"""File like API to read fields separated by any delimiters
It'll take care of file decoding to unicode.
This is an adaptor on a file object.
>>> if PY3:
... from io import BytesIO
... def File(s):
... _obj = BytesIO()
... _obj.write(s.encode(_preferred_encoding))
... _obj.seek(0)
... return _obj
... else:
... from cStringIO import StringIO as File
>>> f = Phile(File("a-b-c-d"))
Read provides an iterator:
>>> def show(l):
... print(", ".join(l))
>>> show(f.read(delimiter="-"))
a, b, c, d
You can change the buffersize loaded into memory before outputing
your changes. It should not change the iterator output:
>>> f = Phile(File("é-à-ü-d"), buffersize=3)
>>> len(list(f.read(delimiter="-")))
4
>>> f = Phile(File("foo-bang-yummy"), buffersize=3)
>>> show(f.read(delimiter="-"))
foo, bang, yummy
>>> f = Phile(File("foo-bang-yummy"), buffersize=1)
>>> show(f.read(delimiter="-"))
foo, bang, yummy
"""
def __init__(self, filename, buffersize=4096, encoding=_preferred_encoding):
self._file = filename
self._buffersize = buffersize
self._encoding = encoding
def read(self, delimiter="\n"):
buf = ""
if PY3:
delimiter = delimiter.encode(_preferred_encoding)
buf = buf.encode(_preferred_encoding)
while True:
chunk = self._file.read(self._buffersize)
if not chunk:
yield buf.decode(self._encoding)
return
records = chunk.split(delimiter)
records[0] = buf + records[0]
for record in records[:-1]:
yield record.decode(self._encoding)
buf = records[-1]
def write(self, buf):
if PY3:
buf = buf.encode(self._encoding)
return self._file.write(buf)
def close(self):
return self._file.close()
class Proc(Popen):
def __init__(self, command, env=None, encoding=_preferred_encoding):
super(Proc, self).__init__(
command, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=PLT_CFG['close_fds'], env=env,
universal_newlines=False)
self.stdin = Phile(self.stdin, encoding=encoding)
self.stdout = Phile(self.stdout, encoding=encoding)
self.stderr = Phile(self.stderr, encoding=encoding)
def cmd(command, env=None, shell=True):
p = Popen(command, shell=shell,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=PLT_CFG['close_fds'], env=env,
universal_newlines=False)
out, err = p.communicate()
return (
out.decode(getattr(sys.stdout, "encoding", None) or
_preferred_encoding),
err.decode(getattr(sys.stderr, "encoding", None) or
_preferred_encoding),
p.returncode)
@available_in_config
def wrap(command, ignore_errlvls=[0], env=None, shell=True):
"""Wraps a shell command and casts an exception on unexpected errlvl
>>> wrap('/tmp/lsdjflkjf') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ShellError: Wrapped command '/tmp/lsdjflkjf' exited with errorlevel 127.
stderr:
| /bin/sh: .../tmp/lsdjflkjf: not found
>>> print(wrap('echo hello'), end='')
hello
>>> print(wrap('echo hello && false'),
... end='') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ShellError: Wrapped command 'echo hello && false' exited with errorlevel 1.
stdout:
| hello
"""
out, err, errlvl = cmd(command, env=env, shell=shell)
if errlvl not in ignore_errlvls:
formatted = []
if out:
if out.endswith('\n'):
out = out[:-1]
formatted.append("stdout:\n%s" % indent(out, "| "))
if err:
if err.endswith('\n'):
err = err[:-1]
formatted.append("stderr:\n%s" % indent(err, "| "))
msg = '\n'.join(formatted)
raise ShellError("Wrapped command %r exited with errorlevel %d.\n%s"
% (command, errlvl, indent(msg, chars=" ")),
errlvl=errlvl, command=command, out=out, err=err)
return out
@available_in_config
def swrap(command, **kwargs):
"""Same as ``wrap(...)`` but strips the output."""
return wrap(command, **kwargs).strip()
##
## git information access
##
class SubGitObjectMixin(object):
def __init__(self, repos):
self._repos = repos
@property
def git(self):
"""Simple delegation to ``repos`` original method."""
return self._repos.git
GIT_FORMAT_KEYS = {
'sha1': "%H",
'sha1_short': "%h",
'subject': "%s",
'author_name': "%an",
'author_email': "%ae",
'author_date': "%ad",
'author_date_timestamp': "%at",
'committer_name': "%cn",
'committer_date_timestamp': "%ct",
'raw_body': "%B",
'body': "%b",
}
GIT_FULL_FORMAT_STRING = "%x00".join(GIT_FORMAT_KEYS.values())
REGEX_RFC822_KEY_VALUE = \
r'(^|\n)(?P<key>[A-Z]\w+(-\w+)*): (?P<value>[^\n]*(\n\s+[^\n]*)*)'
REGEX_RFC822_POSTFIX = \
r'(%s)+$' % REGEX_RFC822_KEY_VALUE
class GitCommit(SubGitObjectMixin):
r"""Represent a Git Commit and expose through its attribute many information
Let's create a fake GitRepos:
>>> from minimock import Mock
>>> repos = Mock("gitRepos")
Initialization:
>>> repos.git = Mock("gitRepos.git")
>>> repos.git.log.mock_returns_func = \
... lambda *a, **kwargs: "\x00".join([{
... 'sha1': "000000",
... 'sha1_short': "000",
... 'subject': SUBJECT,
... 'author_name': "John Smith",
... 'author_date': "Tue Feb 14 20:31:22 2017 +0700",
... 'author_email': "john.smith@example.com",
... 'author_date_timestamp': "0", ## epoch
... 'committer_name': "Alice Wang",
... 'committer_date_timestamp': "0", ## epoch
... 'raw_body': "my subject\n\n%s" % BODY,
... 'body': BODY,
... }[key] for key in GIT_FORMAT_KEYS.keys()])
>>> repos.git.rev_list.mock_returns = "123456"
Query, by attributes or items:
>>> SUBJECT = "fee fie foh"
>>> BODY = "foo foo foo"
>>> head = GitCommit(repos, "HEAD")
>>> head.subject
Called gitRepos.git.log(...'HEAD'...)
'fee fie foh'
>>> head.author_name
'John Smith'
Notice that on the second call, there's no need to call again git log as
all the values have already been computed.
Trailer
=======
``GitCommit`` offers a simple direct API to trailer values. These
are like RFC822's header value but are at the end of body:
>>> BODY = '''\
... Stuff in the body
... Change-id: 1234
... Value-X: Supports multi
... line values'''
>>> head = GitCommit(repos, "HEAD")
>>> head.trailer_change_id
Called gitRepos.git.log(...'HEAD'...)
'1234'
>>> head.trailer_value_x
'Supports multi\nline values'
Notice how the multi-line value was unindented.
In case of multiple values, these are concatened in lists:
>>> BODY = '''\
... Stuff in the body
... Co-Authored-By: Bob
... Co-Authored-By: Alice
... Co-Authored-By: Jack
... '''
>>> head = GitCommit(repos, "HEAD")
>>> head.trailer_co_authored_by
Called gitRepos.git.log(...'HEAD'...)
['Bob', 'Alice', 'Jack']
Special values
==============
Authors
-------
>>> BODY = '''\
... Stuff in the body
... Co-Authored-By: Bob
... Co-Authored-By: Alice
... Co-Authored-By: Jack
... '''
>>> head = GitCommit(repos, "HEAD")
>>> head.author_names
Called gitRepos.git.log(...'HEAD'...)
['Alice', 'Bob', 'Jack', 'John Smith']
Notice that they are printed in alphabetical order.
"""
def __init__(self, repos, identifier):
super(GitCommit, self).__init__(repos)
self.identifier = identifier
self._trailer_parsed = False
def __getattr__(self, label):
"""Completes commits attributes upon request."""
attrs = GIT_FORMAT_KEYS.keys()
if label not in attrs:
try:
return self.__dict__[label]
except KeyError:
if self._trailer_parsed:
raise AttributeError(label)
identifier = self.identifier
## Compute only missing information
missing_attrs = [l for l in attrs if l not in self.__dict__]
## some commit can be already fully specified (see ``mk_commit``)
if missing_attrs:
aformat = "%x00".join(GIT_FORMAT_KEYS[l]
for l in missing_attrs)
try:
ret = self.git.log([identifier, "--max-count=1",
"--pretty=format:%s" % aformat, "--"])
except ShellError:
if DEBUG:
raise
raise ValueError("Given commit identifier %r doesn't exists"
% self.identifier)
attr_values = ret.split("\x00")
for attr, value in zip(missing_attrs, attr_values):
setattr(self, attr, value.strip())
## Let's interpret RFC822-like header keys that could be in the body
match = re.search(REGEX_RFC822_POSTFIX, self.body)
if match is not None:
pos = match.start()
postfix = self.body[pos:]
self.body = self.body[:pos]
for match in re.finditer(REGEX_RFC822_KEY_VALUE, postfix):
dct = match.groupdict()
key = dct["key"].replace("-", "_").lower()
if "\n" in dct["value"]:
first_line, remaining = dct["value"].split('\n', 1)
value = "%s\n%s" % (first_line,
textwrap.dedent(remaining))
else:
value = dct["value"]
try:
prev_value = self.__dict__["trailer_%s" % key]
except KeyError:
setattr(self, "trailer_%s" % key, value)
else:
setattr(self, "trailer_%s" % key,
prev_value + [value, ]
if isinstance(prev_value, list)
else [prev_value, value, ])
self._trailer_parsed = True
return getattr(self, label)
@property
def author_names(self):
return [re.sub(r'^([^<]+)<[^>]+>\s*$', r'\1', author).strip()
for author in self.authors]
@property
def authors(self):
co_authors = getattr(self, 'trailer_co_authored_by', [])
co_authors = co_authors if isinstance(co_authors, list) \
else [co_authors]
return sorted(co_authors +
["%s <%s>" % (self.author_name, self.author_email)])
@property
def date(self):
d = datetime.datetime.utcfromtimestamp(
float(self.author_date_timestamp))
return d.strftime('%Y-%m-%d')
@property
def has_annotated_tag(self):
try:
self.git.rev_parse(['%s^{tag}' % self.identifier, "--"])
return True
except ShellError as e:
if e.errlvl != 128:
raise
return False
@property
def tagger_date_timestamp(self):
if not self.has_annotated_tag:
raise ValueError("Can't access 'tagger_date_timestamp' on commit without annotated tag.")
tagger_date_utc = self.git.for_each_ref(
'refs/tags/%s' % self.identifier, format='%(taggerdate:raw)')
return tagger_date_utc.split(" ", 1)[0]
@property
def tagger_date(self):
d = datetime.datetime.utcfromtimestamp(
float(self.tagger_date_timestamp))
return d.strftime('%Y-%m-%d')
def __le__(self, value):
if not isinstance(value, GitCommit):
value = self._repos.commit(value)
try:
self.git.merge_base(value.sha1, is_ancestor=self.sha1)
return True
except ShellError as e:
if e.errlvl != 1:
raise
return False
def __lt__(self, value):
if not isinstance(value, GitCommit):
value = self._repos.commit(value)
return self <= value and self != value
def __eq__(self, value):
if not isinstance(value, GitCommit):
value = self._repos.commit(value)
return self.sha1 == value.sha1
def __hash__(self):
return hash(self.sha1)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.identifier)
def normpath(path, cwd=None):
"""path can be absolute or relative, if relative it uses the cwd given as
param.
"""
if os.path.isabs(path):
return path
cwd = cwd if cwd else os.getcwd()
return os.path.normpath(os.path.join(cwd, path))
class GitConfig(SubGitObjectMixin):
"""Interface to config values of git
Let's create a fake GitRepos:
>>> from minimock import Mock
>>> repos = Mock("gitRepos")
Initialization:
>>> cfg = GitConfig(repos)
Query, by attributes or items:
>>> repos.git.config.mock_returns = "bar"
>>> cfg.foo
Called gitRepos.git.config('foo')
'bar'
>>> cfg["foo"]
Called gitRepos.git.config('foo')
'bar'
>>> cfg.get("foo")
Called gitRepos.git.config('foo')
'bar'
>>> cfg["foo.wiz"]
Called gitRepos.git.config('foo.wiz')
'bar'
Notice that you can't use attribute search in subsection as ``cfg.foo.wiz``
That's because in git config files, you can have a value attached to
an element, and this element can also be a section.
Nevertheless, you can do:
>>> getattr(cfg, "foo.wiz")
Called gitRepos.git.config('foo.wiz')
'bar'
Default values
--------------
get item, and getattr default values can be used:
>>> del repos.git.config.mock_returns
>>> repos.git.config.mock_raises = ShellError('Key not found',
... errlvl=1, out="", err="")
>>> getattr(cfg, "foo", "default")
Called gitRepos.git.config('foo')
'default'
>>> cfg["foo"] ## doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'foo'
>>> getattr(cfg, "foo") ## doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError...
>>> cfg.get("foo", "default")
Called gitRepos.git.config('foo')
'default'
>>> print("%r" % cfg.get("foo"))
Called gitRepos.git.config('foo')
None
"""
def __init__(self, repos):
super(GitConfig, self).__init__(repos)
def __getattr__(self, label):
try:
res = self.git.config(label)
except ShellError as e:
if e.errlvl == 1 and e.out == "":
raise AttributeError("key %r is not found in git config."
% label)
raise
return res
def get(self, label, default=None):
return getattr(self, label, default)
def __getitem__(self, label):
try:
return getattr(self, label)
except AttributeError:
raise KeyError(label)
class GitCmd(SubGitObjectMixin):
def __getattr__(self, label):
label = label.replace("_", "-")
def dir_swrap(command, **kwargs):
with set_cwd(self._repos._orig_path):
return swrap(command, **kwargs)
def method(*args, **kwargs):
if (len(args) == 1 and not isinstance(args[0], basestring)):
return dir_swrap(
['git', label, ] + args[0],
shell=False,
env=kwargs.get("env", None))
cli_args = []
for key, value in kwargs.items():
cli_key = (("-%s" if len(key) == 1 else "--%s")
% key.replace("_", "-"))
if isinstance(value, bool):
cli_args.append(cli_key)
else:
cli_args.append(cli_key)
cli_args.append(value)
cli_args.extend(args)
return dir_swrap(['git', label, ] + cli_args, shell=False)
return method
class GitRepos(object):
def __init__(self, path):
## Saving this original path to ensure all future git commands
## will be done from this location.
self._orig_path = os.path.abspath(path)
## verify ``git`` command is accessible:
try:
self._git_version = self.git.version()
except ShellError:
if DEBUG:
raise
raise EnvironmentError(
"Required ``git`` command not found or broken in $PATH. "
"(calling ``git version`` failed.)")
## verify that we are in a git repository
try:
self.git.remote()
except ShellError:
if DEBUG:
raise
raise EnvironmentError(
"Not in a git repository. (calling ``git remote`` failed.)")
self.bare = self.git.rev_parse(is_bare_repository=True) == "true"
self.toplevel = (None if self.bare else
self.git.rev_parse(show_toplevel=True))
self.gitdir = normpath(self.git.rev_parse(git_dir=True),
cwd=self._orig_path)
@classmethod
def create(cls, directory, *args, **kwargs):
os.mkdir(directory)
return cls.init(directory, *args, **kwargs)
@classmethod
def init(cls, directory, user=None, email=None):
with set_cwd(directory):
wrap("git init .")
self = cls(directory)
if user:
self.git.config("user.name", user)
if email:
self.git.config("user.email", email)
return self
def commit(self, identifier):
return GitCommit(self, identifier)
@property
def git(self):
return GitCmd(self)
@property
def config(self):
return GitConfig(self)
def tags(self, contains=None):
"""String list of repository's tag names
Current tag order is committer date timestamp of tagged commit.
No firm reason for that, and it could change in future version.
"""
if contains:
tags = self.git.tag(contains=contains).split("\n")
else:
tags = self.git.tag().split("\n")
## Should we use new version name sorting ? refering to :
## ``git tags --sort -v:refname`` in git version >2.0.
## Sorting and reversing with command line is not available on
## git version <2.0
return sorted([self.commit(tag) for tag in tags if tag != ''],
key=lambda x: int(x.committer_date_timestamp))
def log(self, includes=["HEAD", ], excludes=[], include_merge=True,
encoding=_preferred_encoding, path=None):
"""Reverse chronological list of git repository's commits
Note: rev lists can be GitCommit instance list or identifier list.
"""
refs = {'includes': includes,
'excludes': excludes}
for ref_type in ('includes', 'excludes'):
for idx, ref in enumerate(refs[ref_type]):
if not isinstance(ref, GitCommit):
refs[ref_type][idx] = self.commit(ref)
## --topo-order: don't mix commits from separate branches.
plog = Proc("git log --stdin -z --topo-order --pretty=format:%s %s %s"
% (GIT_FULL_FORMAT_STRING,
'--no-merges' if not include_merge else '',
'--' if not path else '-- ' + path),
encoding=encoding)
for ref in refs["includes"]:
plog.stdin.write("%s\n" % ref.sha1)
for ref in refs["excludes"]:
plog.stdin.write("^%s\n" % ref.sha1)
plog.stdin.close()
def mk_commit(dct):
"""Creates an already set commit from a dct"""
c = self.commit(dct["sha1"])
for k, v in dct.items():
setattr(c, k, v)
return c
values = plog.stdout.read("\x00")
try:
while True: ## next(values) will eventualy raise a StopIteration
yield mk_commit(dict([(key, next(values))
for key in GIT_FORMAT_KEYS]))
except StopIteration:
pass ## since 3.7, we are not allowed anymore to trickle down
## StopIteration.
finally:
plog.stdout.close()
plog.stderr.close()
def first_matching(section_regexps, string):
for section, regexps in section_regexps:
if regexps is None:
return section
for regexp in regexps:
if re.search(regexp, string) is not None:
return section
def ensure_template_file_exists(label, template_name):
"""Return template file path given a label hint and the template name
Template name can be either a filename with full path,
if this is the case, the label is of no use.
If ``template_name`` does not refer to an existing file,
then ``label`` is used to find a template file in the
the bundled ones.
"""
try:
template_path = GitRepos(os.getcwd()).config.get(
"gitchangelog.template-path")
except ShellError as e:
stderr(
"Error parsing git config: %s."
" Won't be able to read 'template-path' if defined."
% (str(e)))
template_path = None
if template_path:
path_file = path_label = template_path
else:
path_file = os.getcwd()
path_label = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"templates", label)
for ftn in [os.path.join(path_file, template_name),
os.path.join(path_label, "%s.tpl" % template_name)]:
if os.path.isfile(ftn):
return ftn
templates = glob.glob(os.path.join(path_label, "*.tpl"))
if len(templates) > 0:
msg = ("These are the available %s templates:" % label)
msg += "\n - " + \
"\n - ".join(os.path.basename(f).split(".")[0]
for f in templates)
msg += "\nTemplates are located in %r" % path_label
else:
msg = "No available %s templates found in %r." \
% (label, path_label)
die("Error: Invalid %s template name %r.\n" % (label, template_name) +
"%s" % msg)
##
## Output Engines
##
@available_in_config
def rest_py(data, opts={}):
"""Returns ReStructured Text changelog content from data"""
def rest_title(label, char="="):
return (label.strip() + "\n") + (char * len(label) + "\n")
def render_version(version):
title = "%s (%s)" % (version["tag"], version["date"]) \
if version["tag"] else \
opts["unreleased_version_label"]
s = rest_title(title, char="-")
sections = version["sections"]
nb_sections = len(sections)
for section in sections:
section_label = section["label"] if section.get("label", None) \
else "Other"
if not (section_label == "Other" and nb_sections == 1):
s += "\n" + rest_title(section_label, "~")
for commit in section["commits"]:
s += render_commit(commit)
return s
def render_commit(commit, opts=opts):
subject = commit["subject"]
subject += " [%s]" % (", ".join(commit["authors"]), )
entry = indent('\n'.join(textwrap.wrap(subject)),
first="- ").strip() + "\n"
if commit["body"]:
entry += "\n" + indent(commit["body"])
entry += "\n"
return entry
if data["title"]:
yield rest_title(data["title"], char="=") + "\n\n"
for version in data["versions"]:
if len(version["sections"]) > 0:
yield render_version(version) + "\n\n"
## formatter engines
if pystache:
@available_in_config
def mustache(template_name):
"""Return a callable that will render a changelog data structure
returned callable must take 2 arguments ``data`` and ``opts``.
"""
template_path = ensure_template_file_exists("mustache", template_name)
template = file_get_contents(template_path)
def stuffed_versions(versions, opts):
for version in versions:
title = "%s (%s)" % (version["tag"], version["date"]) \
if version["tag"] else \
opts["unreleased_version_label"]
version["label"] = title
version["label_chars"] = list(version["label"])
for section in version["sections"]:
section["label_chars"] = list(section["label"])
section["display_label"] = \
not (section["label"] == "Other" and
len(version["sections"]) == 1)
for commit in section["commits"]:
commit["author_names_joined"] = ", ".join(
commit["authors"])
commit["body_indented"] = indent(commit["body"])
yield version
def renderer(data, opts):
## mustache is very simple so we need to add some intermediate
## values
data["general_title"] = True if data["title"] else False
data["title_chars"] = list(data["title"]) if data["title"] else []
data["versions"] = stuffed_versions(data["versions"], opts)
return pystache.render(template, data)
return renderer
else:
@available_in_config
def mustache(template_name): ## pylint: disable=unused-argument
die("Required 'pystache' python module not found.")
if mako:
import mako.template ## pylint: disable=wrong-import-position
mako_env = dict((f.__name__, f) for f in (ucfirst, indent, textwrap,
paragraph_wrap))
@available_in_config
def makotemplate(template_name):
"""Return a callable that will render a changelog data structure
returned callable must take 2 arguments ``data`` and ``opts``.
"""
template_path = ensure_template_file_exists("mako", template_name)
template = mako.template.Template(filename=template_path)
def renderer(data, opts):
kwargs = mako_env.copy()
kwargs.update({"data": data,
"opts": opts})
return template.render(**kwargs)
return renderer
else:
@available_in_config
def makotemplate(template_name): ## pylint: disable=unused-argument
die("Required 'mako' python module not found.")
##
## Publish action
##
@available_in_config
def stdout(content):
for chunk in content:
safe_print(chunk)
@available_in_config
def FileInsertAtFirstRegexMatch(filename, pattern, flags=0,
idx=lambda m: m.start()):
def write_content(f, content):
for content_line in content:
f.write(content_line)
def _wrapped(content):
index = idx(_file_regex_match(filename, pattern, flags=flags))
offset = 0
new_offset = 0
postfix = False
with open(filename + "~", "w") as dst:
with open(filename, "r") as src:
for line in src:
if postfix:
dst.write(line)
continue
new_offset = offset + len(line)
if new_offset < index:
offset = new_offset
dst.write(line)
continue
dst.write(line[0:index - offset])
write_content(dst, content)
dst.write(line[index - offset:])
postfix = True
if not postfix:
write_content(dst, content)
if WIN32:
os.remove(filename)
os.rename(filename + "~", filename)
return _wrapped
@available_in_config
def FileRegexSubst(filename, pattern, replace, flags=0):
replace = re.sub(r'\\([0-9+])', r'\\g<\1>', replace)
def _wrapped(content):
src = file_get_contents(filename)
## Protect replacement pattern against the following expansion of '\o'
src = re.sub(
pattern,
replace.replace(r'\o', "".join(content).replace('\\', '\\\\')),
src, flags=flags)
if not PY3:
src = src.encode(_preferred_encoding)
file_put_contents(filename, src)
return _wrapped
##
## Data Structure
##
def versions_data_iter(repository, revlist=None,
ignore_regexps=[],
section_regexps=[(None, '')],
tag_filter_regexp=r"\d+\.\d+(\.\d+)?",
include_merge=True,
body_process=lambda x: x,
subject_process=lambda x: x,
log_encoding=DEFAULT_GIT_LOG_ENCODING,
warn=warn, ## Mostly used for test
path=None,
):
"""Returns an iterator through versions data structures
(see ``gitchangelog.rc.reference`` file for more info)
:param repository: target ``GitRepos`` object
:param revlist: list of strings that git log understands as revlist
:param ignore_regexps: list of regexp identifying ignored commit messages
:param section_regexps: regexps identifying sections
:param tag_filter_regexp: regexp to match tags used as version
:param include_merge: whether to include merge commits in the log or not
:param body_process: text processing object to apply to body
:param subject_process: text processing object to apply to subject
:param log_encoding: the encoding used in git logs
:param warn: callable to output warnings, mocked by tests
:param path: limit commits to those occurring under this path
:returns: iterator of versions data_structures
"""
revlist = revlist or []
## Hash to speedup lookups
versions_done = {}
excludes = [rev[1:]
for rev in repository.git.rev_parse([
"--rev-only", ] + revlist + ["--", ]).split("\n")
if rev.startswith("^")] if revlist else []
revs = repository.git.rev_list(*revlist).split("\n") if revlist else []
revs = [rev for rev in revs if rev != ""]
if revlist and not revs:
die("No commits matching given revlist: %s" % (" ".join(revlist), ))
tags = [tag
for tag in repository.tags(contains=revs[-1] if revs else None)
if re.match(tag_filter_regexp, tag.identifier)]
tags.append(repository.commit("HEAD"))
if revlist:
max_rev = repository.commit(revs[0])
new_tags = []
for tag in tags:
new_tags.append(tag)
if max_rev <= tag:
break
tags = new_tags
else:
max_rev = tags[-1]
section_order = [k for k, _v in section_regexps]
tags = list(reversed(tags))
## Get the changes between tags (releases)
for idx, tag in enumerate(tags):
## New version
current_version = {
"date": tag.tagger_date if tag.has_annotated_tag else tag.date,
"commit_date": tag.date,
"tagger_date": tag.tagger_date if tag.has_annotated_tag else None,
"tag": tag.identifier if tag.identifier != "HEAD" else None,
"commit": tag,
}
sections = collections.defaultdict(list)
commits = repository.log(
includes=[min(tag, max_rev)],
excludes=tags[idx + 1:] + excludes,
include_merge=include_merge,
encoding=log_encoding,
path=path)
for commit in commits:
if any(re.search(pattern, commit.subject) is not None
for pattern in ignore_regexps):
continue
matched_section = first_matching(section_regexps, commit.subject)
## Finally storing the commit in the matching section
sections[matched_section].append({
"author": commit.author_name,
"authors": commit.author_names,
"subject": subject_process(commit.subject),
"body": body_process(commit.body),
"commit": commit,
})
## Flush current version
current_version["sections"] = [{"label": k, "commits": sections[k]}
for k in section_order
if k in sections]
if len(current_version["sections"]) != 0:
yield current_version
versions_done[tag] = current_version
def changelog(output_engine=rest_py,
unreleased_version_label="unreleased",
warn=warn, ## Mostly used for test
**kwargs):
"""Returns a string containing the changelog of given repository
This function returns a string corresponding to the template rendered with
the changelog data tree.
(see ``gitchangelog.rc.sample`` file for more info)
For an exact list of arguments, see the arguments of
``versions_data_iter(..)``.
:param unreleased_version_label: version label for untagged commits
:param output_engine: callable to render the changelog data
:param warn: callable to output warnings, mocked by tests
:returns: content of changelog
"""
opts = {
'unreleased_version_label': unreleased_version_label,
}
## Setting main container of changelog elements
title = None if kwargs.get("revlist") else "Changelog"
data = {"title": title,
"versions": []}
versions = versions_data_iter(warn=warn, **kwargs)
## poke once in versions to know if there's at least one:
try:
first_version = next(versions)
except StopIteration:
warn("Empty changelog. No commits were elected to be used as entry.")
data["versions"] = []
else:
data["versions"] = itertools.chain([first_version], versions)
return output_engine(data=data, opts=opts)
##
## Manage obsolete options
##
_obsolete_options_managers = []
def obsolete_option_manager(fun):
_obsolete_options_managers.append(fun)
@obsolete_option_manager
def obsolete_replace_regexps(config):
"""This option was superseeded by the ``subject_process`` option.
Each regex replacement you had could be translated in a
``ReSub(pattern, replace)`` in the ``subject_process`` pipeline.
"""
if "replace_regexps" in config:
for pattern, replace in config["replace_regexps"].items():
config["subject_process"] = \
ReSub(pattern, replace) | \
config.get("subject_process", ucfirst | final_dot)
@obsolete_option_manager
def obsolete_body_split_regexp(config):
"""This option was superseeded by the ``body_process`` option.
The split regex can now be sent as a ``Wrap(regex)`` text process
instruction in the ``body_process`` pipeline.
"""
if "body_split_regex" in config:
config["body_process"] = Wrap(config["body_split_regex"]) | \
config.get("body_process", noop)
def manage_obsolete_options(config):
for man in _obsolete_options_managers:
man(config)
##
## Command line parsing
##
def parse_cmd_line(usage, description, epilog, exname, version):
import argparse
kwargs = dict(usage=usage,
description=description,
epilog="\n" + epilog,
prog=exname,
formatter_class=argparse.RawTextHelpFormatter)
try:
parser = argparse.ArgumentParser(version=version, **kwargs)
except TypeError: ## compat with argparse from python 3.4
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument('-v', '--version',
help="show program's version number and exit",
action="version", version=version)
parser.add_argument('-d', '--debug',
help="Enable debug mode (show full tracebacks).",
action="store_true", dest="debug")
parser.add_argument('revlist', nargs='*', action="store", default=[])
## Remove "show" as first argument for compatibility reason.
argv = []
for i, arg in enumerate(sys.argv[1:]):
if arg.startswith("-"):
argv.append(arg)
continue
if arg == "show":
warn("'show' positional argument is deprecated.")
argv += sys.argv[i + 2:]
break
else:
argv += sys.argv[i + 1:]
break
return parser.parse_args(argv)
eval_if_callable = lambda v: v() if callable(v) else v
def get_revision(repository, config, opts):
if opts.revlist:
revs = opts.revlist
else:
revs = config.get("revs")
if revs:
revs = eval_if_callable(revs)
if not isinstance(revs, list):
die("Invalid type for 'revs' in config file. "
"A 'list' type is required, and a %r was given."
% type(revs).__name__)
revs = [eval_if_callable(rev)
for rev in revs]
else:
revs = []
for rev in revs:
if not isinstance(rev, basestring):
die("Invalid type for revision in revs list from config file. "
"'str' type is required, and a %r was given."
% type(rev).__name__)
try:
repository.git.rev_parse([rev, "--rev_only", "--"])
except ShellError:
if DEBUG:
raise
die("Revision %r is not valid." % rev)
if revs == ["HEAD", ]:
return []
return revs
def get_log_encoding(repository, config):
log_encoding = config.get("log_encoding", None)
if log_encoding is None:
try:
log_encoding = repository.config.get("i18n.logOuputEncoding")
except ShellError as e:
warn(
"Error parsing git config: %s."
" Couldn't check if 'i18n.logOuputEncoding' was set."
% (str(e)))
## Final defaults coming from git defaults
return log_encoding or DEFAULT_GIT_LOG_ENCODING
##
## Config Manager
##
class Config(dict):
def __getitem__(self, label):
if label not in self.keys():
die("Missing value in config file for key '%s'." % label)
return super(Config, self).__getitem__(label)
##
## Safe print
##
def safe_print(content):
if not PY3:
if isinstance(content, unicode):
content = content.encode(_preferred_encoding)
try:
print(content, end='')
sys.stdout.flush()
except UnicodeEncodeError:
if DEBUG:
raise
## XXXvlab: should use $COLUMNS in bash and for windows:
## http://stackoverflow.com/questions/14978548
stderr(paragraph_wrap(textwrap.dedent("""\
UnicodeEncodeError:
There was a problem outputing the resulting changelog to
your console.
This probably means that the changelog contains characters
that can't be translated to characters in your current charset
(%s).
""") % sys.stdout.encoding))
if WIN32 and PY_VERSION < 3.6 and sys.stdout.encoding != 'utf-8':
## As of PY 3.6, encoding is now ``utf-8`` regardless of
## PYTHONIOENCODING
## https://www.python.org/dev/peps/pep-0528/
stderr(" You might want to try to fix that by setting "
"PYTHONIOENCODING to 'utf-8'.")
exit(1)
except IOError as e:
if e.errno == 0 and not PY3 and WIN32:
## Yes, had a strange IOError Errno 0 after outputing string
## that contained UTF-8 chars on Windows and PY2.7
pass ## Ignoring exception
elif ((WIN32 and e.errno == 22) or ## Invalid argument
(not WIN32 and e.errno == errno.EPIPE)): ## Broken Pipe
## Nobody is listening anymore to stdout it seems. Let's bailout.
if PY3:
try:
## Called only to generate exception and have a chance at
## ignoring it. Otherwise this happens upon exit, and gets
## some error message printed on stderr.
sys.stdout.close()
except BrokenPipeError: ## expected outcome on linux
pass
except OSError as e2:
if e2.errno != 22: ## expected outcome on WIN32
raise
## Yay ! stdout is closed we can now exit safely.
exit(0)
else:
raise
##
## Main
##
def main():
global DEBUG
## Basic environment infos
reference_config = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"gitchangelog.rc.reference")
basename = os.path.basename(sys.argv[0])
if basename.endswith(".py"):
basename = basename[:-3]
debug_varname = "DEBUG_%s" % basename.upper()
DEBUG = os.environ.get(debug_varname, False)
i = lambda x: x % {'exname': basename}
opts = parse_cmd_line(usage=i(usage_msg),
description=i(description_msg),
epilog=i(epilog_msg),
exname=basename,
version=__version__)
DEBUG = DEBUG or opts.debug
try:
repository = GitRepos(".")
except EnvironmentError as e:
if DEBUG:
raise
try:
die(str(e))
except Exception as e2:
die(repr(e2))
try:
gc_rc = repository.config.get("gitchangelog.rc-path")
except ShellError as e:
stderr(
"Error parsing git config: %s."
" Won't be able to read 'rc-path' if defined."
% (str(e)))
gc_rc = None
gc_rc = normpath(gc_rc, cwd=repository.toplevel) if gc_rc else None
## config file lookup resolution
for enforce_file_existence, fun in [
(True, lambda: os.environ.get('GITCHANGELOG_CONFIG_FILENAME')),
(True, lambda: gc_rc),
(False,
lambda: (os.path.join(repository.toplevel, ".%s.rc" % basename))
if not repository.bare else None)]:
changelogrc = fun()
if changelogrc:
if not os.path.exists(changelogrc):
if enforce_file_existence:
die("File %r does not exists." % changelogrc)
else:
continue ## changelogrc valued, but file does not exists
else:
break
## config file may lookup for templates relative to the toplevel
## of git repository
os.chdir(repository.toplevel)
config = load_config_file(
os.path.expanduser(changelogrc),
default_filename=reference_config,
fail_if_not_present=False)
config = Config(config)
log_encoding = get_log_encoding(repository, config)
revlist = get_revision(repository, config, opts)
config['unreleased_version_label'] = eval_if_callable(
config['unreleased_version_label'])
manage_obsolete_options(config)
try:
content = changelog(
repository=repository, revlist=revlist,
ignore_regexps=config['ignore_regexps'],
section_regexps=config['section_regexps'],
unreleased_version_label=config['unreleased_version_label'],
tag_filter_regexp=config['tag_filter_regexp'],
output_engine=config.get("output_engine", rest_py),
include_merge=config.get("include_merge", True),
body_process=config.get("body_process", noop),
subject_process=config.get("subject_process", noop),
log_encoding=log_encoding,
path=config.get('path', None)
)
if isinstance(content, basestring):
content = content.splitlines(True)
config.get("publish", stdout)(content)
except KeyboardInterrupt:
if DEBUG:
err("Keyboard interrupt received while running '%s':"
% (basename, ))
stderr(format_last_exception())
else:
err("Keyboard Interrupt. Bailing out.")
exit(130) ## Actual SIGINT as bash process convention.
except Exception as e: ## pylint: disable=broad-except
if DEBUG:
err("Exception while running '%s':"
% (basename, ))
stderr(format_last_exception())
else:
message = "%s" % e
err(message)
stderr(" (set %s environment variable, "
"or use ``--debug`` to see full traceback)" %
(debug_varname, ))
exit(255)
##
## Launch program
##
if __name__ == "__main__":
main()
| 31.067763
| 101
| 0.563501
|
a25e5d7428206757618f754a08f661586532e871
| 1,467
|
py
|
Python
|
fight.py
|
JayPrakash916/KBC-CLI-Game
|
500087cfc75b4909b6210474b3925260990aba51
|
[
"MIT"
] | 1
|
2022-03-08T15:11:42.000Z
|
2022-03-08T15:11:42.000Z
|
fight.py
|
jaypatel149/KBC-CLI-Game
|
500087cfc75b4909b6210474b3925260990aba51
|
[
"MIT"
] | null | null | null |
fight.py
|
jaypatel149/KBC-CLI-Game
|
500087cfc75b4909b6210474b3925260990aba51
|
[
"MIT"
] | null | null | null |
# from random import randint
# alive = True
# stamina = 10
# def report(stamina):
# if stamina > 8:
# print ("The alien is strong! It resists your pathetic attack!")
# elif stamina > 5:
# print ("With a loud grunt, the alien stands firm.")
# elif stamina > 3:
# print ("Your attack seems to be having an effect! The alien stumbles!")
# elif stamina > 0:
# print ("The alien is certain to fall soon! It staggers and reels!")
# else:
# print ("That's it! The alien is finished!")
# report(stamina)
# def fight(stamina):
# while stamina > 0:
# response = input("> Enter a move 1.Hit 2.attack 3.fight 4.run>")
# if response in "hit" or response in "attack":
# less = randint(0, stamina)
# stamina -= less
# report(stamina)
# elif response in "fight":
# print ("Fight how? You have no weapons, silly space traveler!")
# elif response in "run":
# print ("Sadly, there is nowhere to run.")
# print ("The spaceship is not very big.")
# else:
# print ("The alien zaps you with its powerful ray gun!")
# return True
# return False
# print ("A threatening alien wants to fight you!\n")
# alive = fight(stamina)
# if alive==True:
# print ("\nThe alien lives on, and you, sadly, do not.\n")
# else:
# print ("\nThe alien has been vanquished. Good work!\n")
| 31.891304
| 81
| 0.57396
|
937e08a44a0c1180c61a3e5e90cf21ee74fd10f7
| 170
|
py
|
Python
|
content/example/math_numba_gpu.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
content/example/math_numba_gpu.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | 17
|
2022-03-01T13:12:11.000Z
|
2022-03-31T09:55:45.000Z
|
content/example/math_numba_gpu.py
|
qianglise/HPDA-Python
|
93eefae38569bce35e82586c17ed4a164c3db1c4
|
[
"CC-BY-4.0",
"MIT"
] | 2
|
2022-03-01T16:33:10.000Z
|
2022-03-07T07:48:03.000Z
|
import math
import numba
@numba.vectorize([numba.float64(numba.float64, numba.float64)], target='cuda')
def f_numba_gpu(x,y):
return math.pow(x,3.0) + 4*math.sin(y)
| 24.285714
| 78
| 0.717647
|
b47bfbbe9ed3491cb2001cae1d52b114063a6755
| 3,321
|
py
|
Python
|
assignments/A6/A6/agent3.py
|
jajayongjia/366
|
6389723638cef07adbe28a9c7116bc4f71b0e29c
|
[
"MIT"
] | null | null | null |
assignments/A6/A6/agent3.py
|
jajayongjia/366
|
6389723638cef07adbe28a9c7116bc4f71b0e29c
|
[
"MIT"
] | null | null | null |
assignments/A6/A6/agent3.py
|
jajayongjia/366
|
6389723638cef07adbe28a9c7116bc4f71b0e29c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Author: Adam White, Matthew Schlegel, Mohammad M. Ajallooeian, Sina Ghiassian
Purpose: Skeleton code for Monte Carlo Exploring Starts Control Agent
for use on A3 of Reinforcement learning course University of Alberta Fall 2017
"""
from utils import *
import numpy as np
import pickle
#epsilon = 0.1
alpha =0.1
gamma = 0.9
#Q = None
last_action = None
last_state = None
action = None
num_total_states = 1000 # num_total_states: integer
#model = None
action_number = 2
groupTotalNumber = 10
w = np.zeros(groupTotalNumber)
x = np.identity(groupTotalNumber)
groupSize = 100
#x = None
def agent_init():
"""
Hint: Initialize the variables that need to be reset before each run begins
Returns: nothing
"""
#initialize the policy array in a smart way
global last_state,w,groupSize,groupTotalNumber,states,x
last_state = 500
last_action = 0
w = np.zeros(groupTotalNumber)
x = np.identity(groupTotalNumber)
def agent_start(state):
"""
Hint: Initialize the variavbles that you want to reset before starting a new episode
Arguments: state: numpy array
Returns: action: integer
"""
# pick the first action, don't forget about exploring starts
global last_state,w,groupSize, last_action
if np.random.binomial(1, 0.5) == 1:
direction = 1
direction = -1
action = np.random.randint(1, groupSize + 1)
action *= direction
last_action = action
last_state = 500 + action
return action
def agent_step(reward, state): # returns NumPy array, reward: floating point, this_observation: NumPy array
"""
Arguments: reward: floting point, state: integer
Returns: action: integer
"""
# select an action, based on Q
global last_state,w,groupSize,last_action,x
groupIndex = (state - 1) // groupSize
lastGroupIndex = (last_state-1)//groupSize
w[lastGroupIndex] += alpha*(reward+np.dot(w, x[groupIndex]) -np.dot(w, x[lastGroupIndex]))
last_state = state
if np.random.binomial(1, 0.5) == 1:
direction = 1
else:
direction = -1
action = np.random.randint(1, groupSize + 1)
action *= direction
last_action = action
return action
def agent_end(reward):
"""
Arguments: reward: floating point
Returns: Nothing
"""
# do learning and update pi
global last_state,w,groupSize, last_action,x
lastGroupIndex = (last_state-1)//groupSize
w[lastGroupIndex] += alpha*(reward+0-np.dot(w, x[lastGroupIndex]))
return
def agent_cleanup():
"""
This function is not used
"""
# clean up
return
def agent_message(in_message): # returns string, in_message: string
global last_state,w,groupSize, last_action,x
"""
Arguments: in_message: string
returns: The value function as a string.
This function is complete. You do not need to add code here.
"""
# should not need to modify this function. Modify at your own risk
if (in_message == 'ValueFunction'):
valueFunction = np.zeros(1000)
for i in range(10):
for m in range(100):
valueFunction[(i*100)+m] += w[i]
return valueFunction
else:
return "I don't know what to return!!"
| 25.351145
| 107
| 0.657934
|
91f674f0d43eac3d2cbd3b74bde4ac98c01d1c07
| 1,459
|
py
|
Python
|
venv/lib/python3.6/site-packages/examples/example_dnssec_settings.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 465
|
2016-05-07T00:22:59.000Z
|
2022-03-31T08:36:24.000Z
|
venv/lib/python3.6/site-packages/examples/example_dnssec_settings.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 129
|
2016-05-17T08:00:15.000Z
|
2022-03-31T23:09:36.000Z
|
venv/lib/python3.6/site-packages/examples/example_dnssec_settings.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 167
|
2016-05-09T16:19:27.000Z
|
2022-03-31T07:19:18.000Z
|
#!/usr/bin/env python
"""Cloudflare API code - example"""
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import CloudFlare
def main():
"""Cloudflare API code - example"""
# Grab the first argument, if there is one
try:
zone_name = sys.argv[1]
params = {'name':zone_name, 'per_page':1}
except IndexError:
params = {'per_page':1}
cf = CloudFlare.CloudFlare()
# grab the zone identifier
try:
zones = cf.zones.get(params=params)
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones.get %d %s - api call failed' % (e, e))
except Exception as e:
exit('/zones.get - %s - api call failed' % (e))
# there should only be one zone
for zone in sorted(zones, key=lambda v: v['name']):
zone_name = zone['name']
zone_id = zone['id']
# grab the DNSSEC settings
try:
settings = cf.zones.dnssec.get(zone_id)
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones.dnssec.get %d %s - api call failed' % (e, e))
print(zone_id, zone_name)
# display every setting value
for setting in sorted(settings):
print('\t%-30s %10s = %s' % (
setting,
'(editable)' if setting == 'status' else '',
settings[setting]
))
print('')
exit(0)
if __name__ == '__main__':
main()
| 26.053571
| 70
| 0.566141
|
d81024c9a2a76620ab66416314477f99560e3429
| 36,455
|
py
|
Python
|
pydevices/RfxDevices/CAENDT5720.py
|
orozda/mdsplus
|
101265b0f94554e06c0e461b59862a6d3b656f8c
|
[
"BSD-2-Clause"
] | null | null | null |
pydevices/RfxDevices/CAENDT5720.py
|
orozda/mdsplus
|
101265b0f94554e06c0e461b59862a6d3b656f8c
|
[
"BSD-2-Clause"
] | null | null | null |
pydevices/RfxDevices/CAENDT5720.py
|
orozda/mdsplus
|
101265b0f94554e06c0e461b59862a6d3b656f8c
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from MDSplus import Device, Data, Window, Range, Dimension, TreePath
from MDSplus import Int32, Float32, Float64
from threading import Thread, Condition
from ctypes import CDLL, c_int, c_short, c_long, byref, Structure, c_void_p, c_char_p
from time import sleep
class CAENDT5720(Device):
"""CAEN DT5720 4 Channels 12 Bit 250MS/S Digitizer"""
parts = [
{'path': ':BOARD_ID', 'type': 'numeric', 'value': 0},
{'path': ':COMMENT', 'type': 'text'},
{'path': ':TRIG_MODE', 'type': 'text', 'value': 'OVER THRESHOLD'},
{'path': ':TRIG_SOFT', 'type': 'text', 'value': 'ENABLED'},
{'path': ':TRIG_EXT', 'type': 'text', 'value': 'ENABLED'},
{'path': ':TRIG_SOURCE', 'type': 'numeric'},
{'path': ':CLOCK_MODE', 'type': 'text', 'value': '250 MHz'},
{'path': ':CLOCK_SOURCE', 'type': 'numeric'},
{'path': ':NUM_SEGMENTS', 'type': 'numeric','value': 1024},
{'path': ':USE_TIME', 'type': 'text', 'value': 'YES'},
{'path': ':PTS', 'type': 'numeric','value': 1024},
{'path': ':START_IDX', 'type': 'numeric','value': 0},
{'path': ':END_IDX', 'type': 'numeric','value': 1024},
{'path': ':START_TIME', 'type': 'numeric','value':0},
{'path': ':END_TIME', 'type': 'numeric','value':1E-6},
{'path': ':ACQ_MODE', 'type': 'text','value': 'TRANSIENT RECORDER'},
{'path': ':IRQ_EVENTS', 'type': 'numeric','value': 0},
]
for i in range(4):
parts.extend([
{'path': '.CHANNEL_%d'%(i+1), 'type': 'structure'},
{'path': '.CHANNEL_%d:STATE'%(i+1), 'type': 'text', 'value': 'ENABLED'},
{'path': '.CHANNEL_%d:TRIG_STATE'%(i+1), 'type': 'text',
'value': 'DISABLED'},
{'path': '.CHANNEL_%d:OFFSET'%(i+1), 'type': 'numeric',
'value': 0},
{'path': '.CHANNEL_%d:DAC_OFFSET'%(i+1), 'type': 'numeric',
'value': 0},
{'path': '.CHANNEL_%d:THRESH_LEVEL'%(i+1), 'type': 'numeric',
'value': 0},
{'path': '.CHANNEL_%d:THRESH_SAMPL'%(i+1), 'type':'numeric',
'value': 0},
{'path': '.CHANNEL_%d:DATA'%(i+1), 'type': 'signal'},
{'path': '.CHANNEL_%d:SEG_RAW'%(i+1), 'type': 'signal'},
])
del(i)
parts.extend([
{'path': ':INIT_ACTION','type': 'action',
'valueExpr': ("Action(Dispatch('PXI_SERVER','INIT',50,None),"
"Method(None,'init',head))"),
'options': ('no_write_shot',)},
{'path': ':START_ACTION','type': 'action',
'valueExpr': ("Action(Dispatch('PXI_SERVER','STORE',50,None),"
"Method(None,'start_store',head))"),
'options': ('no_write_shot',)},
{'path': ':STOP_ACTION','type':'action',
'valueExpr': ("Action(Dispatch('PXI_SERVER','STORE',50,None),"
"Method(None,'stop_store',head))"),
'options': ('no_write_shot',)},
{'path': ':NUM_CHANNELS', 'type': 'numeric','value': 0},
])
cvV1718 = 0 # CAEN V1718 USB-VME bridge
cvV2718 = 1 # V2718 PCI-VME bridge with optical link
cvA2818 = 2 # PCI board with optical link
cvA2719 = 3 # Optical link piggy-back
cvA32_S_DATA = 0x0D # A32 supervisory data access
cvD32 = 0x04 # D32
cvD64 = 0x08
MEM_512kS = 524288
MEM_4MS = 4194304
MEM_1_25MS = 1310720
MEM_10MS = 10485760
MEM_1MS = 1048576
InternalFrequency = 250.E6
HANDLE_RESTORE = 1
HANDLE_OPEN = 2
caenLib = None
caenInterfaceLib = None
caenHandles = {}
caenCvs = {}
caenReadCvs = {}
caenWorkers = {}
caenNids = {}
#Support Class for IRQ Wait
class IRQWait(Thread):
def configure(self, handle, cv, readCv, usbLink):
self.handle = handle
self.cv = cv
self.readCv = readCv
self.usbLink = usbLink
def run(self):
while 0 == 0:
self.readCv.acquire()
self.readCv.wait()
self.readCv.release()
vmeAddress = 0;
acqStatus = c_int(0)
if not self.usbLink:
print ('waiting IRQ')
CAENDT5720.caenLib.CAENVME_IRQWait(self.handle, c_long(0x01), c_long(1000000))
print ('IRQ Received')
else:
print ('waiting spinlock')
#status = CAENDT5720.caenLib.CAENVME_IRQWait(self.handle, c_long(0x0), c_long(1000000))
while True :
CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x8104), byref(acqStatus), c_int(CAENDT5720.cvA32_S_DATA), c_int(CAENDT5720.cvD32))
if (acqStatus.value) & (1 << 4) :
print ('Data available ')
break
else :
sleep(1e-4) # 100us
self.cv.acquire()
self.cv.notify()
self.cv.release()
#end class IRQWait
#Support class for continuous store
class AsynchStore(Thread):
cvV1718 = 0 # CAEN V1718 USB-VME bridge
cvV2718 = 1 # V2718 PCI-VME bridge with optical link
cvA2818 = 2 # PCI board with optical link
cvA2719 = 3 # Optical link piggy-back
cvA32_S_DATA = 0x0D # A32 supervisory data access
cvD32 = 0x04 # D32
cvD64 = 0x08
#def configure(self, handle, acqMode, startIdx, endIdx, pts, actChans, nActChans, dt, trigTime, triggerSourceNid, segmentSamples, segmentSize, chanMask, nid, device, cv, readCv, useCounter, irqEvents):
def configure(self, handle, acqMode, startIdx, endIdx, pts, actChans, nActChans, dt, triggerSourceNid, segmentSamples, segmentSize, chanMask, nid, device, cv, readCv, useCounter, irqEvents):
self.handle = handle
self.startIdx = startIdx
self.endIdx = endIdx
self.acqMode = acqMode
self.pts = pts
self.actChans = actChans
self.nActChans = nActChans
self.dt = dt
"""
self.trigTime = trigTime
"""
self.segmentSamples = segmentSamples
self.segmentSize = segmentSize
self.chanMask = chanMask
self.nid = nid
self.device = device
self.cv = cv
self.readCv = readCv
self.useCounter = useCounter
self.irqEvents = irqEvents
self.triggerSourceNid = triggerSourceNid
self.saveList = c_void_p(0)
def run(self):
class DT5720Data(Structure):
_fields_ = [("eventSize", c_int), ("boardGroup", c_int), ("counter", c_int), ("time", c_int), ("data", c_short * (self.segmentSamples * self.nActChans))]
treePtr = c_void_p(0)
status = CAENDT5720.caenInterfaceLib.openTree(c_char_p(self.device.getTree().name), c_int(self.device.getTree().shot), byref(treePtr))
CAENDT5720.caenInterfaceLib.startSave(byref(self.saveList))
vmeAddress = 0
self.endIdx -= 10
# currStartIdx = self.segmentSamples - self.pts + self.startIdx
# currEndIdx = self.segmentSamples - self.pts + self.endIdx
# currChanSamples = currEndIdx - currStartIdx
numChannels = self.device.num_channels.data()
clockNid = self.device.clock_source.getNid()
triggNid = self.device.trig_source.getNid()
numTrigger = 0
channels = []
chanNid = []
if self.acqMode == "TRANSIENT RECORDER":
numTrigger = len(self.device.trig_source.getData())
else :
#continuous
numTrigger = -1
for chan in range(0,numChannels):
channels.append([])
chanNid.append( getattr(self.device, 'channel_%d_seg_raw'%(chan+1)).getNid() )
chanNid_c = (c_int * len(chanNid) )(*chanNid)
# currSegmentIdx = 0
segmentCounter = 0
self.dtArray = []
while not self.stopReq:
self.readCv.acquire()
self.readCv.notify()
self.readCv.release()
self.cv.acquire()
#print 'WAIT CONDITION'
self.cv.wait()
self.cv.release()
#print 'CONDITION ISSUED'
# Read number of buffers
actSegments = c_int(0)
status = CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x812C), byref(actSegments), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
print('Number of buffer ', actSegments)
if status != 0:
print ('Error reading number of acquired segments')
continue
segmentCounter = CAENDT5720.caenInterfaceLib.readAndSaveSegments(self.handle, c_int(vmeAddress), c_int(numChannels), c_int(self.nActChans), c_int(self.segmentSamples), c_int(self.segmentSize),
c_int(self.startIdx), c_int(self.endIdx), c_int(self.pts), c_int(self.useCounter), c_int(self.chanMask), c_int(segmentCounter),
c_int(numTrigger), chanNid_c, clockNid, triggNid, treePtr, self.saveList)
if self.acqMode == "TRANSIENT RECORDER" and segmentCounter == numTrigger :
print ('Transient Recoder acquisition completed!!!!')
return 0
if self.stopReq :
print ('ASYNCH STORE EXITED!!!!')
return 0
status = CAENDT5720.caenLib.CAENVME_IRQEnable(self.handle, c_int(0x01));
#endwhile self.stopReq == 0:
return 0
def stop(self):
self.stopReq = True
self.cv.acquire()
self.cv.notify()
self.cv.release()
#need to wait a while
sleep(0.5)
CAENDT5720.caenInterfaceLib.stopSave(self.saveList)
self.saveList = c_void_p(0)
#end class AsynchStore
def saveInfo(self):
#CAENDT5720.caenNids
CAENDT5720.caenHandles[self.getNid()] = self.handle
CAENDT5720.caenCvs[self.getNid()] = self.cv
CAENDT5720.caenReadCvs[self.getNid()] = self.readCv
# If worker is running stop it
# Worker is saved by saveWorker
try:
CAENDT5720.caenWorkers[self.getNid()].stop()
CAENDT5720.caenWorkers[self.getNid()].stopReq = True
except:
pass
def restoreInfo(self):
#global caenHandles
#global caenCvs
#global caenWorkers
#global nids
if CAENDT5720.caenLib is None:
CAENDT5720.caenLib = CDLL("libCAENVME.so")
try:
CAENDT5720.caenLib
except:
print ('Error loading library libCAENVME.so')
return 0
if CAENDT5720.caenInterfaceLib is None:
CAENDT5720.caenInterfaceLib = CDLL("libCaenInterface.so")
try:
CAENDT5720.caenInterfaceLib
except:
print ('Error loading library libCaenInterface.so')
return 0
try:
#idx = caenNids.index(self.getNid())
self.handle = CAENDT5720.caenHandles[self.getNid()]
self.cv = CAENDT5720.caenCvs[self.getNid()]
self.readCv = CAENDT5720.caenReadCvs[self.getNid()]
#self.worker = CAENDT5720.caenWorkers[self.getNid()]
return self.HANDLE_RESTORE
except:
try:
boardId = self.board_id.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid Board ID specification')
return 0
self.handle = c_long(0)
print ('HANDLE NOT FOUND INITIALIZE CAEN MODULE')
#status = caenLib.CAENVME_Init(c_int(self.cvV2718), c_int(0), c_int(boardId), byref(self.handle))
#Device VMEDevice (V3718 card ) is 0, BOARID is istead VMELink from 0 to 3 for the V3718 4 link card
##status = CAENDT5720.caenLib.CAENVME_Init(c_int(self.cvV2718), c_int(boardId), c_int(0), byref(self.handle))
# boardId < 10 fiber optic link
# boardId > 10 USB link board identifier = boardId - 10
if boardId < 10 :
status = CAENDT5720.caenLib.CAENVME_Init(c_int(self.cvV2718), c_int(boardId), c_int(0), byref(self.handle))
else :
status = CAENDT5720.caenLib.CAENVME_Init(c_int(self.cvV1718), c_int(boardId-10), c_int(0), byref(self.handle))
if status != 0:
print ('Error initializing CAENVME')
return 0
self.cv = Condition()
self.readCv = Condition()
IRQw = self.IRQWait()
IRQw.daemon = True
IRQw.configure(self.handle, self.cv, self.readCv, (boardId >= 10) )
IRQw.start()
print ('CAEN MODULE OPENED')
return self.HANDLE_OPEN
################################### Worker Management
def saveWorker(self):
CAENDT5720.caenWorkers[self.getNid()] = self.worker
def restoreWorker(self):
try:
if self.getNid() in CAENDT5720.caenWorkers.keys():
self.worker = CAENDT5720.caenWorkers[self.getNid()]
except:
print('Cannot restore worker!!')
################################# INIT ###############################
def init(self):
if self.restoreInfo() == 0 :
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot open DT5720 Device' )
return 0
vmeAddress = 0
#Module Reset
data = c_int(0)
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0xEF24), byref(data), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error resetting DT5720 Device' )
CAENDT5720.caenLib.CAENVME_End(self.handle)
return 0
#give some time
sleep(0.1)
#Module type
devType = c_int(0)
status = CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x8140), byref(devType), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error reading board info')
CAENDT5720.caenLib.CAENVME_End(self.handle)
return 0
if (devType.value & 0x000000FF) != 0 and (devType.value & 0x000000FF) != 0x3:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid board type. Device must be DT5720 model')
return 0
if (devType.value & 0x000000FF) == 0 :
if (devType.value & 0x0000FF00) >> 8 == 0x01 :
self.chanMemory = self.MEM_512kS
else:
self.chanMemory = self.MEM_4MS
if (devType.value & 0x000000FF) == 0x3 :
if (devType.value & 0x0000FF00) >> 8 == 0x2 :
#self.chanMemory = self.MEM_1_25MS
self.chanMemory = self.MEM_1MS
else:
self.chanMemory = self.MEM_10MS
print ('Channel Memory: ', self.chanMemory)
numChannels = devType.value >> 16
print ('DevType code: ', devType.value)
print ('NUM CHANNELS: ', numChannels)
print ('Channel Memory: ', self.chanMemory)
self.num_channels.putData(numChannels)
"""
print "write decimation factor. Not Yet implemented"
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8044), byref(c_int(0x2)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing decimation' )
return 0
"""
#Number of segments
segmentDict = {1:0, 2:1, 4:2, 8:3, 16:4, 32:5, 64:6, 128:7, 256:8, 512:9, 1024:10}
try:
nSegments=self.num_segments.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid Number of Segments')
return 0
segmentCode = segmentDict[nSegments]
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x800c), byref(c_int(segmentCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
#print "Buffer Organization 0x800C ", segmentCode
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing number of segments' )
return 0
#Global Channel Configuration
trigModeDict = {'OVER THRESHOLD':0, 'UNDER THRESHOLD':1}
try:
trigMode = self.trig_mode.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid Trigger mode')
return 0
trigModeCode = trigModeDict[trigMode]
conf = trigModeCode << 6
conf = conf | 0x00000010
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8000), byref(c_int(conf)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing group configuration')
return 0
#Channel configurations
trigEnableCode = 0
chanEnableCode = 0
enabledDict = {'ENABLED':1, 'DISABLED':0}
numChannels = self.num_channels.data()
for chan in range(0,numChannels):
#Empy the node which will contain the segmented data
getattr(self, 'channel_%d_seg_raw'%(chan+1)).deleteData()
#Set threshold level
threshold = getattr(self, 'channel_%d_thresh_level'%(chan+1)).data()
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x1080 + chan * 0x100), byref(c_int(threshold)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing threshold level')
return 0
#Set threshold samples
threshSamples = getattr(self, 'channel_%d_thresh_sampl'%(chan+1)).data()
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x1084 + chan * 0x100), byref(c_int(threshSamples)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing threshold samples')
return 0
#Read FIRMWARE info
"""
firmware = c_uint(0)
status = CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x108C + chan * 0x100), byref(firmware), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
print "firmware AMC FPGA Addr ", hex(vmeAddress + 0x108C + chan * 0x100), hex((firmware.value >> 16) & 0x0000ffff), " Version ", hex((firmware.value >> 8) & 0x000000ff), ".", hex((firmware.value ) & 0x000000ff)
"""
dac_offset = getattr(self, 'channel_%d_dac_offset'%(chan+1)).data()
#Channel offset compensation
try:
offset = getattr(self, 'channel_%d_offset'%(chan+1)).data()
except:
offset = 0;
#Set offset
offset = offset + dac_offset
print ('Ch ', chan ,'Offset Volt = ',offset)
if(offset > 1.125):
offset = 1.125
if(offset < -1.125):
offset = -1.125
offset = (offset / 1.125) * 32767
print ('Ch ', chan ,'Offset Val. =', int(offset))
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x1098 + chan * 0x100), byref(c_int(int(offset + 0x08000))), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing DAC offset')
return 0
#Enable channel
state = getattr(self, 'channel_%d_state'%(chan+1)).data()
chanEnableCode = chanEnableCode | (enabledDict[state] << chan)
#Enable Trigger
trigState = getattr(self, 'channel_%d_trig_state'%(chan+1)).data()
trigEnableCode = trigEnableCode | (enabledDict[trigState] << chan)
#END channel configuration loop
#Set channel enabled mask
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8120), byref(c_int(chanEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing Channel enable register')
return 0
#Set channel trigger mask
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x810C), byref(c_int(trigEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing Channel trigger enable register')
return 0
#Set trigger enabling
trigExt = self.trig_ext.data()
trigEnableCode = trigEnableCode | (enabledDict[trigExt] << 30)
trigSoft = self.trig_soft.data()
trigEnableCode = trigEnableCode | (enabledDict[trigSoft] << 31)
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x810C), byref(c_int(trigEnableCode)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error writing trigger configuration')
return 0
#Front Panel trigger out setting set TRIG/CLK to TTL
data = 1
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x811C), byref(c_int(data)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
#Configure trigger source
"""
try:
trigSource = self.trig_source.data()
#Trigger source must be an array, consider only the first element as triggerSource time
if len(self.trig_source.getShape()) > 0:
trigSource = trigSource[0]
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot resolve Trigger source')
return 0
"""
#Configure clock source
# The clock source can be only INTERNAL
clockMode = self.clock_mode.data()
if clockMode == 'EXTERNAL':
try:
clockSource = self.clock_source()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot resolve Clock source')
return 0
else:
clockSource = Range(None, None, Float64(1/self.InternalFrequency))
self.clock_source.putData(clockSource)
#Configure Post Trigger Samples
try:
pts = int( self.pts.data() )
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot resolve PTS Samples')
return 0
segmentSize = self.chanMemory/nSegments
if pts > segmentSize:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'PTS Larger than segmentSize')
return 0
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8114), byref(c_int(pts>>1)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
#Time management compute endIdx and startIdx
useTime=self.use_time.data()
if useTime == 'YES':
try:
#Start and End Index acquisition burst calculation is prfomend with trigger time set to 0
trigSource = 0.
startTime = self.start_time.data()
endTime = self.end_time.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot Read Start or End time')
return 0
if endTime > 0:
endIdx = Data.execute('x_to_i($1, $2)', Dimension(Window(0, segmentSize, trigSource), clockSource), Float64(endTime + trigSource))
else:
endIdx = -Data.execute('x_to_i($1,$2)', Dimension(Window(0, segmentSize, trigSource + endTime), clockSource), Float64(trigSource))
self.end_idx.putData(Int32(int(endIdx + 0.5)))
if startTime > 0:
startIdx = Data.execute('x_to_i($1, $2)', Dimension(Window(0, segmentSize, trigSource), clockSource), startTime + trigSource)
else:
startIdx = -Data.execute('x_to_i($1,$2)', Dimension(Window(0, segmentSize, trigSource + startTime), clockSource), trigSource)
self.start_idx.putData(Int32(int(startIdx + 0.5)))
currStartIdx = int(segmentSize - pts + startIdx.data())
if currStartIdx < 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid segment size/pre-trigger samples')
return 0
currEndIdx = int(segmentSize - pts + endIdx.data())
if currEndIdx >= segmentSize:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid segment size/post-trigger samples')
return 0
print ('startIdx : ', int(startIdx))
print ('endIdx : ', int(endIdx))
print ('SEGMENT SIZE : ', int(segmentSize))
print ('PTS : ', pts)
print ('currStartIdx : ', currStartIdx)
print ('currEndIdx : ', currEndIdx)
usbLink = ( self.board_id.data() >= 10 )
acqMode = self.acq_mode.data()
if not usbLink and (acqMode == 'CONTINUOUS' or acqMode == 'CONTINUOUS WITH COUNTER' or acqMode == 'TRANSIENT RECORDER') :
irqEvents = self.irq_events.data()
irqEvents = irqEvents - 1
if irqEvents < 1:
irqEvents = 1
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0xEF18), byref(c_int(irqEvents)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error setting IRQ events')
return 0
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0xEF00), byref(c_int(0x09)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error setting IRQ line')
return 0
status = CAENDT5720.caenLib.CAENVME_IRQEnable(self.handle, c_int(0x01))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error Enabling IRQ')
return 0
#Start asynchronous readout thread
#self.start_store()
#endif acqMode == 'CONTINUOUS SAMPLING'
self.saveInfo()
return 1
################################ TRIGGER ###################################
def trigger(self):
if ( self.restoreInfo() != self.HANDLE_RESTORE and self.worker.stopReq == True ) :
Data.execute('DevLogErr($1,$2)', self.getNid(), 'DT5720 Device not initialized' )
return 0
try:
vmeAddress = 0
#Module SW trigger
# data = c_int(0)
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger DT5720 Device' )
return 0
return 1
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Generic SW trigger Error' )
return 0
################################# START STORE ###############################
def start_store(self):
if ( self.restoreInfo() != self.HANDLE_RESTORE ) :
Data.execute('DevLogErr($1,$2)', self.getNid(), 'DT5720 Device not initialized' )
return 0
vmeAddress = 0
#Module type
devType = c_int(0)
status = CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x8140), byref(devType), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error reading board info')
return 0
"""
if (devType.value & 0x000000FF) != 0 :
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid board type. Device must be DT5720 model')
return 0
if (devType.value & 0x0000FF00) >> 8 == 0x01 :
self.chanMemory = self.MEM_512kS
else:
self.chanMemory = self.MEM_4MS
"""
if (devType.value & 0x000000FF) != 0 and (devType.value & 0x000000FF) != 0x3:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid board type. Device must be DT5720 model')
return 0
if (devType.value & 0x000000FF) == 0 :
if (devType.value & 0x0000FF00) >> 8 == 0x01 :
self.chanMemory = self.MEM_512kS
else:
self.chanMemory = self.MEM_4MS
if (devType.value & 0x000000FF) == 0x3 :
if (devType.value & 0x0000FF00) >> 8 == 0x2 :
#self.chanMemory = self.MEM_1_25MS
self.chanMemory = self.MEM_1MS
else:
self.chanMemory = self.MEM_10MS
try:
self.board_id.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Invalid Board ID specification')
return 0
try:
clock = self.clock_source.evaluate()
dt = clock.getDelta().data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error evaluating clock source' )
return 0
try:
triggerSourceNid = TreePath(self.trig_source.getFullPath())
#trigTime = self.trig_source.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error evaluating trigger source' )
return 0
try:
startIdx = self.start_idx.data()
endIdx = self.end_idx.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error evaluating start or end idx')
return 0
try:
pts = self.pts.data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error evaluating Post Trigger Samples' )
return 0
#Compute Segment Size
try:
nSegments = self.num_segments.data()
segmentSamples = self.chanMemory/nSegments
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error reading max number of segments' )
return 0
# currStartIdx = segmentSamples - pts + startIdx
# currEndIdx = segmentSamples - pts + endIdx
#Get Active channels
chanMask = c_int(0)
status = CAENDT5720.caenLib.CAENVME_ReadCycle(self.handle, c_int(vmeAddress + 0x8120), byref(chanMask), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
nActChans = 0
chanMask = chanMask.value
numChannels = self.num_channels.data()
for chan in range(0,numChannels):
if (chanMask & (1 << chan)) != 0:
nActChans = nActChans + 1
if nActChans == 0:
print ('No active groups')
return 1
segmentSize = 16 + 2 * segmentSamples * nActChans
acqMode = self.acq_mode.data()
for chan in range(0,numChannels):
if (chanMask & (1 << chan)) != 0:
try:
dac_offset = getattr(self, 'channel_%d_dac_offset'%(chan+1)).data()
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error reading channel DAC offset')
return 0
if acqMode == 'CONTINUOUS WITH COUNTER':
useCounter = True
data = Data.compile("2.25*($1 - 2048)/4096.+$2", TreePath(getattr(self, 'channel_%d_seg_raw'%(chan+1)).getFullPath()), dac_offset);
else:
useCounter = False
segRawPath = TreePath(getattr(self, 'channel_%d_seg_raw'%(chan+1)).getFullPath())
data = Data.compile("(2.25*( $ - 2048)/4096. + $ )", segRawPath, Float32(dac_offset) )
try:
getattr(self, 'channel_%d_data'%(chan+1)).putData(data)
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error Writing data' )
return 0
#endfor chan in range(0,numChannels):
self.worker = self.AsynchStore()
self.worker.daemon = True
self.worker.stopReq = False
#self.worker.configure(self.handle, acqMode, startIdx, endIdx, pts, chanMask, nActChans, dt, trigTime, triggerSourceNid, segmentSamples, segmentSize, chanMask, self.getNid(), self, self.cv, self.readCv, useCounter, self.irq_events.data() + 1)
self.worker.configure(self.handle, acqMode, startIdx, endIdx, pts, chanMask, nActChans, dt, triggerSourceNid, segmentSamples, segmentSize, chanMask, self.getNid(), self, self.cv, self.readCv, useCounter, self.irq_events.data() + 1)
try:
runCommand = 4
"""
#External cllock not yet implemented
if clockMode == 'EXTERNAL':
runCommand = runCommand | 0x00000040
"""
#Module SW trigger
data = c_int(0)
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8100), byref(c_int(runCommand)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error starting acquisition on DT5720 Device' )
return 0
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot starting acquisition on DT5720 Device SW exception' )
return 0
self.saveWorker()
self.worker.start()
"""
try:
if acqMode == 'TRANSIENT RECORDER':
trigSoft = self.trig_soft.data()
if trigSoft == 'ENABLED':
trigSource = self.trig_source.data()
t0 = trigSource[0]
sleep(t0)
print "SW Trigger ", trigSource[0]
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger DT5720 Device' )
return 0
if len(trigSource) == 1 :
sleep( 1 )
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger(1) DT5720 Device' )
return 0
for delay in trigSource[1 : ] :
sleep( delay - t0 )
t0 = delay
print "SW Trigger ", delay
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8108), byref(c_int(0L)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error in sofware trigger DT5720 Device' )
return 0
except:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Cannot starting acquisition on DT5720 Device SW exception' )
return 0
"""
return 1
#################################### STOP STORE ###################################
def stop_store(self):
if self.restoreInfo() != self.HANDLE_RESTORE :
Data.execute('DevLogErr($1,$2)', self.getNid(), 'DT5720 Device not initialized' )
return 0
vmeAddress = 0
#Stop device
status = CAENDT5720.caenLib.CAENVME_WriteCycle(self.handle, c_int(vmeAddress + 0x8100), byref(c_int(0)), c_int(self.cvA32_S_DATA), c_int(self.cvD32))
if status != 0:
Data.execute('DevLogErr($1,$2)', self.getNid(), 'Error stopping device')
return 0
#need to wait a while
sleep(0.5)
self.restoreWorker()
if self.worker.isAlive():
print ("PXI CAENDT5720 stop_worker")
self.worker.stop()
del self.worker
return 1
| 40.10451
| 248
| 0.600521
|
ee269436a0a76c7351b420811a929b16e0ed781d
| 772
|
py
|
Python
|
EngLearner/users/migrations/0007_auto_20200330_2201.py
|
jiangyifan123/EngLearner
|
a54c205a6e6bf3b2af366b56a7b7f97344fa1466
|
[
"bzip2-1.0.6"
] | null | null | null |
EngLearner/users/migrations/0007_auto_20200330_2201.py
|
jiangyifan123/EngLearner
|
a54c205a6e6bf3b2af366b56a7b7f97344fa1466
|
[
"bzip2-1.0.6"
] | null | null | null |
EngLearner/users/migrations/0007_auto_20200330_2201.py
|
jiangyifan123/EngLearner
|
a54c205a6e6bf3b2af366b56a7b7f97344fa1466
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.2 on 2020-03-30 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20200330_2200'),
]
operations = [
migrations.AlterField(
model_name='boughtproduct',
name='quantity',
field=models.IntegerField(default=0, verbose_name='产品数量'),
),
migrations.AlterField(
model_name='shoppingcart',
name='quantity',
field=models.IntegerField(default=0, verbose_name='产品数量'),
),
migrations.AlterField(
model_name='traderecord',
name='quantity',
field=models.IntegerField(default=0, verbose_name='产品数量'),
),
]
| 26.62069
| 70
| 0.586788
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.