source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
hist.py
|
from __future__ import absolute_import, division, print_function
from .kwargs import KWArgs
from .. import _core
from .view import _to_view
from .axis import Axis
from .axistuple import AxesTuple
from .sig_tools import inject_signature
from .storage import Double, Storage
from .utils import cast, register, set_family, MAIN_FAMILY, CPP_FAMILY, set_module
import warnings
import copy
import numpy as np
import os
import threading
_histograms = (
_core.hist.any_double,
_core.hist.any_int64,
_core.hist.any_atomic_int64,
_core.hist.any_unlimited,
_core.hist.any_weight,
_core.hist.any_mean,
_core.hist.any_weighted_mean,
)
def _hist_or_val(other):
return other._hist if hasattr(other, "_hist") else other
def _arg_shortcut(item):
msg = "Developer shortcut: will be removed in a future version"
if isinstance(item, tuple) and len(item) == 3:
warnings.warn(msg, FutureWarning)
return _core.axis.regular_uoflow(item[0], item[1], item[2], None)
elif isinstance(item, tuple) and len(item) == 4:
warnings.warn(msg, FutureWarning)
return _core.axis.regular_uoflow(*item)
elif isinstance(item, Axis):
return item._ax
else:
raise TypeError("Only axes supported in histogram constructor")
# TODO: Currently segfaults if we pass in a non-axis to the C++ code
# Using the public interface above, this should never be possible.
def _expand_ellipsis(indexes, rank):
indexes = list(indexes)
number_ellipses = indexes.count(Ellipsis)
if number_ellipses == 0:
return indexes
elif number_ellipses == 1:
index = indexes.index(Ellipsis)
additional = rank + 1 - len(indexes)
if additional < 0:
raise IndexError("too many indices for histogram")
# Fill out the ellipsis with empty slices
return indexes[:index] + [slice(None)] * additional + indexes[index + 1 :]
else:
raise IndexError("an index can only have a single ellipsis ('...')")
# We currently do not cast *to* a histogram, but this is consistent
# and could be used later.
@register(_histograms)
class BaseHistogram(object):
@inject_signature("self, *axes, storage=Double()", locals={"Double": Double})
def __init__(self, *axes, **kwargs):
"""
Construct a new histogram.
If you pass in a single argument, this will be treated as a
histogram and this will convert the histogram to this type of
histogram (DensityHistogram, Histogram, BoostHistogram).
Parameters
----------
*args : Axis
Provide 1 or more axis instances.
storage : Storage = bh.storage.Double()
Select a storage to use in the histogram
"""
# Allow construction from a raw histogram object (internal)
if not kwargs and len(axes) == 1 and isinstance(axes[0], _histograms):
self._hist = axes[0]
return
if not kwargs and len(axes) == 1 and isinstance(axes[0], BaseHistogram):
self._hist = copy.copy(axes[0]._hist)
return
# Keyword only trick (change when Python2 is dropped)
with KWArgs(kwargs) as k:
storage = k.optional("storage", Double())
# Check for missed parenthesis or incorrect types
if not isinstance(storage, Storage):
if issubclass(storage, Storage):
raise KeyError(
"Passing in an initialized storage has been removed. Please add ()."
)
else:
raise KeyError("Only storages allowed in storage argument")
# Allow a tuple to represent a regular axis
axes = [_arg_shortcut(arg) for arg in axes]
if len(axes) > _core.hist._axes_limit:
raise IndexError(
"Too many axes, must be less than {}".format(_core.hist._axes_limit)
)
# Check all available histograms, and if the storage matches, return that one
for h in _histograms:
if isinstance(storage, h._storage_type):
self._hist = h(axes, storage)
return
raise TypeError("Unsupported storage")
def __array__(self):
return _to_view(self._hist.view(False))
def __add__(self, other):
return self.__class__(self._hist.__add__(other._hist))
def __iadd__(self, other):
self._hist.__iadd__(other._hist)
return self
def __eq__(self, other):
return self._hist == other._hist
def __ne__(self, other):
return self._hist != other._hist
# If these fail, the underlying object throws the correct error
def __mul__(self, other):
return self.__class__(self._hist.__mul__(other))
def __rmul__(self, other):
return self * other
def __imul__(self, other):
self._hist.__imul__(_hist_or_val(other))
return self
def __truediv__(self, other):
return self.__class__(self._hist.__truediv__(_hist_or_val(other)))
def __div__(self, other):
return self.__class__(self._hist.__div__(_hist_or_val(other)))
def __itruediv__(self, other):
self._hist.__itruediv__(_hist_or_val(other))
return self
def __idiv__(self, other):
self._hist.__idiv__(_hist_or_val(other))
return self
def __copy__(self):
other = self.__class__.__new__(self.__class__)
other._hist = copy.copy(self._hist)
return other
@inject_signature("self, *args, weight=None, sample=None, threads=None")
def fill(self, *args, **kwargs):
"""
Insert data into the histogram.
Parameters
----------
*args : Union[Array[float], Array[int], Array[str], float, int, str]
Provide one value or array per dimension.
weight : List[Union[Array[float], Array[int], Array[str], float, int, str]]]
Provide weights (only if the histogram storage supports it)
sample : List[Union[Array[float], Array[int], Array[str], float, int, str]]]
Provide samples (only if the histogram storage supports it)
threads : Optional[int]
Fill with threads. Defaults to None, which does not activate
threaded filling. Using 0 will automatically pick the number of
available threads (usually two per core).
"""
threads = kwargs.pop("threads", None)
if threads is None or threads is 1:
self._hist.fill(*args, **kwargs)
else:
if threads == 0:
threads = os.cpu_count()
print(self._hist._storage_type)
if (
self._hist._storage_type is _core.storage.mean
or self._hist._storage_type is _core.storage.weighted_mean
):
raise RuntimeError("Mean histograms do not support threaded filling")
weight = kwargs.pop("weight", None)
sample = kwargs.pop("sample", None)
data = [np.array_split(a, threads) for a in args]
if weight is None or np.isscalar(weight):
weights = [weight] * threads
else:
weights = np.array_split(weight, threads)
if sample is None or np.isscalar(sample):
samples = [sample] * threads
else:
samples = np.array_split(sample, threads)
if self._hist._storage_type is _core.storage.atomic_int64:
def fun(weight, sample, *args):
self._hist.fill(*args, weight=weight, sample=sample)
else:
sum_lock = threading.Lock()
def fun(weight, sample, *args):
local_hist = self._hist.__copy__()
local_hist.reset()
kw = {}
if weight is not None:
kw["weight"] = weight
if sample is not None:
kw["sample"] = sample
local_hist.fill(*args, **kw)
with sum_lock:
self._hist += local_hist
thread_list = [
threading.Thread(target=fun, args=arrays)
for arrays in zip(weights, samples, *data)
]
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
return self
def __repr__(self):
ret = "{self.__class__.__name__}(\n ".format(self=self)
ret += ",\n ".join(repr(self._axis(i)) for i in range(self._hist.rank()))
ret += ",\n storage={0}".format(self._storage_type())
ret += ")"
outer = self._hist.sum(flow=True)
if outer:
inner = self._hist.sum(flow=False)
ret += " # Sum: {0}".format(inner)
if inner != outer:
ret += " ({0} with flow)".format(outer)
return ret
def __str__(self):
"""
A rendering of the histogram is made using ASCII or unicode characters (whatever is supported by the terminal). What exactly is displayed is still experimental. Do not rely on any particular rendering.
"""
# TODO check the terminal width and adjust the presentation
# only use for 1D, fall back to repr for ND
if self._hist.rank() == 1:
s = str(self._hist)
# get rid of first line and last character
s = s[s.index("\n") + 1 : -1]
else:
s = repr(self)
return s
def _axis(self, i):
"""
Get N-th axis.
"""
return cast(self, self._hist.axis(i), Axis)
@property
def _storage_type(self):
return cast(self, self._hist._storage_type, Storage)
def _reduce(self, *args):
return self.__class__(self._hist.reduce(*args))
# C++ version of histogram
@set_family(CPP_FAMILY)
@set_module("boost_histogram.cpp")
class histogram(BaseHistogram):
axis = BaseHistogram._axis
def rank(self):
"""
Number of axes (dimensions) of histogram.
"""
return self._hist.rank()
def size(self):
"""
Total number of bins in the histogram (including underflow/overflow).
"""
return self._hist.size()
def at(self, *indexes):
"""
Select a contents given indices. -1 is the underflow bin, N is the overflow bin.
"""
return self._hist.at(*indexes)
# Call uses fill since it supports strings,
# runtime argument list, etc.
@inject_signature("self, *args, weight=None, sample=None")
def __call__(self, *args, **kwargs):
args = (((a,) if isinstance(a, str) else a) for a in args)
self._hist.fill(*args, **kwargs)
return self
def _reset(self):
self._hist.reset()
return self
def _empty(self, flow=False):
return self._hist.empty(flow)
def _sum(self, flow=False):
return self._hist.sum(flow)
def _project(self, *args):
return self.__class__(self._hist.project(*args))
@set_family(MAIN_FAMILY)
@set_module("boost_histogram")
class Histogram(BaseHistogram):
@inject_signature("self, *axes, storage=Double()", locals={"Double": Double})
def __init__(self, *args, **kwargs):
super(Histogram, self).__init__(*args, **kwargs)
# If this is a property, tab completion in IPython does not work
self.axes = AxesTuple(self._axis(i) for i in range(self.rank))
__init__.__doc__ = BaseHistogram.__init__.__doc__
def __copy__(self):
other = super(Histogram, self).__copy__()
other.axes = AxesTuple(other._axis(i) for i in range(other.rank))
return other
def __deepcopy__(self, memo):
other = self.__class__.__new__(self.__class__)
other._hist = copy.deepcopy(self._hist, memo)
other.axes = AxesTuple(other._axis(i) for i in range(other.rank))
return other
def __getstate__(self):
state = self.__dict__.copy()
del state["axes"] # Don't save the cashe
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.axes = AxesTuple(self._axis(i) for i in range(self.rank))
def __repr__(self):
newline = "\n "
sep = "," if len(self.axes) > 0 else ""
ret = "{self.__class__.__name__}({newline}".format(
self=self, newline=newline if len(self.axes) > 1 else ""
)
ret += ",{newline}".format(newline=newline).join(repr(ax) for ax in self.axes)
ret += "{comma}{newline}storage={storage}".format(
storage=self._storage_type(),
newline=newline
if len(self.axes) > 1
else " "
if len(self.axes) > 0
else "",
comma="," if len(self.axes) > 0 else "",
)
ret += ")"
outer = self.sum(flow=True)
if outer:
inner = self.sum(flow=False)
ret += " # Sum: {0}".format(inner)
if inner != outer:
ret += " ({0} with flow)".format(outer)
return ret
def _compute_commonindex(self, index):
"""
Takes indices and returns two iterables; one is a tuple or dict of the
original, Ellipsis expanded index, and the other returns index,
operation value pairs.
"""
# Shorten the computations with direct access to raw object
hist = self._hist
# Support dict access
if hasattr(index, "items"):
indexes = [slice(None)] * hist.rank()
for k, v in index.items():
indexes[k] = v
# Normalize -> h[i] == h[i,]
else:
if not isinstance(index, tuple):
index = (index,)
# Now a list
indexes = _expand_ellipsis(index, hist.rank())
if len(indexes) != hist.rank():
raise IndexError("Wrong number of indices for histogram")
# Allow [bh.loc(...)] to work
for i in range(len(indexes)):
if callable(indexes[i]):
indexes[i] = indexes[i](cast(self, hist.axis(i), Axis))
elif hasattr(indexes[i], "flow"):
if indexes[i].flow == 1:
indexes[i] = hist.axis(i).size
elif indexes[i].flow == -1:
indexes[i] = -1
elif isinstance(indexes[i], int):
if abs(indexes[i]) >= hist.axis(i).size:
raise IndexError("histogram index is out of range")
indexes[i] %= hist.axis(i).size
return indexes
def to_numpy(self, flow=False):
"""
Convert to a Numpy style tuple of return arrays.
Return
------
contents : Array[Any]
The bin contents
*edges : Array[float]
The edges for each dimension
"""
return self._hist.to_numpy(flow)
@inject_signature("self, *, deep=True")
def copy(self, **kwargs):
"""
Make a copy of the histogram. Defaults to making a
deep copy (axis metadata copied); use deep=False
to avoid making a copy of axis metadata.
"""
# Future versions may add new options here
with KWArgs(kwargs) as k:
deep = k.optional("deep", True)
if deep:
return copy.deepcopy(self)
else:
return copy.copy(self)
def view(self, flow=False):
"""
Return a view into the data, optionally with overflow turned on.
"""
return _to_view(self._hist.view(flow))
def reset(self):
"""
Reset bin counters to default values.
"""
self._hist.reset()
return self
def empty(self, flow=False):
"""
Check to see if the histogram has any non-default values.
You can use flow=True to check flow bins too.
"""
return self._hist.empty(flow)
def sum(self, flow=False):
"""
Compute the sum over the histogram bins (optionally including the flow bins).
"""
return self._hist.sum(flow)
@property
def rank(self):
"""
Number of axes (dimensions) of histogram.
"""
return self._hist.rank()
@property
def size(self):
"""
Total number of bins in the histogram (including underflow/overflow).
"""
return self._hist.size()
@property
def shape(self):
"""
Tuple of axis sizes (not including underflow/overflow).
"""
return self.axes.size
def __getitem__(self, index):
indexes = self._compute_commonindex(index)
# If this is (now) all integers, return the bin contents
# But don't try *dict!
if not hasattr(indexes, "items"):
try:
return self._hist.at(*indexes)
except RuntimeError:
pass
integrations = set()
slices = []
zeroes_start = []
zeroes_stop = []
# We could use python's sum here, but for now, a private sum is used
class ext_sum:
projection = True
# Compute needed slices and projections
for i, ind in enumerate(indexes):
if hasattr(ind, "__index__"):
ind = slice(ind.__index__(), ind.__index__() + 1, ext_sum())
elif not isinstance(ind, slice):
raise IndexError(
"Must be a slice, an integer, or follow the locator protocol."
)
if ind != slice(None):
merge = 1
if ind.step is not None:
if hasattr(ind.step, "projection"):
if ind.step.projection:
integrations.add(i)
if ind.start is not None: # TODO: Support callables too
zeroes_start.append(i)
if ind.stop is not None:
zeroes_stop.append(i)
if ind.stop is None and ind.start is None:
continue
elif hasattr(ind.step, "factor"):
merge = ind.step.factor
else:
raise IndexError("Invalid rebin, must have integer .factor")
else:
raise IndexError(
"The third argument to a slice must be rebin or projection"
)
process_loc = (
lambda x, y: y
if x is None
else x(self._axis(i))
if callable(x)
else x
)
begin = process_loc(ind.start, 0)
end = process_loc(ind.stop, len(self._axis(i)))
slices.append(_core.algorithm.slice_and_rebin(i, begin, end, merge))
reduced = self._reduce(*slices)
if not integrations:
return self.__class__(reduced)
else:
projections = [i for i in range(self.rank) if i not in integrations]
# Replacement for crop missing in BH
for i in zeroes_start:
if self.axes[i].options.underflow:
reduced._hist._reset_row(i, -1)
for i in zeroes_stop:
if self.axes[i].options.overflow:
reduced._hist._reset_row(i, reduced.axes[i].size)
return (
self.__class__(reduced.project(*projections))
if projections
else reduced.sum(flow=True)
)
def __setitem__(self, index, value):
"""
There are several supported possibilities:
h[slice] = array # same size
If an array is given to a compatible slice, it is set.
h[a:] = array # One larger
If an array is given that does not match, if it does match the
with-overflow size, it fills that.
PLANNED (not yet supported):
h[a:] = h2
If another histogram is given, that must either match with or without
overflow, where the overflow bins must be overflow bins (that is,
you cannot set a histogram's flow bins from another histogram that
is 2 larger). Bin edges must be a close match, as well. If you don't
want this level of type safety, just use ``h[...] = h2.view()``.
"""
indexes = self._compute_commonindex(index)
if isinstance(value, BaseHistogram):
raise TypeError("Not supported yet")
value = np.asarray(value)
view = self.view(flow=True)
# Disallow mismatched data types
if len(value.dtype) != len(view.dtype):
raise ValueError("Mismatched data types; matching types required")
# Numpy does not broadcast partial slices, but we would need
# to allow it (because we do allow broadcasting up dimensions)
# Instead, we simply require matching dimensions.
if value.ndim > 0 and value.ndim != sum(isinstance(i, slice) for i in indexes):
raise ValueError(
"Setting a {0}D histogram with a {1}D array must have a matching number of dimensions".format(
len(indexes), value.ndim
)
)
# Here, value_n does not increment with n if this is not a slice
value_n = 0
for n, request in enumerate(indexes):
has_underflow = self.axes[n].options.underflow
has_overflow = self.axes[n].options.overflow
if isinstance(request, slice):
# Only consider underflow/overflow if the endpoints are not given
use_underflow = has_underflow and request.start is None
use_overflow = has_overflow and request.stop is None
# Make the limits explicit since we may need to shift them
start = 0 if request.start is None else request.start
stop = len(self.axes[n]) if request.stop is None else request.stop
request_len = stop - start
# If set to a scalar, then treat it like broadcasting without flow bins
if value.ndim == 0:
start = 0 + has_overflow
stop = len(self.axes[n]) + has_underflow
# Normal setting
elif request_len == value.shape[value_n]:
start += has_underflow
stop += has_underflow
# Expanded setting
elif request_len + use_underflow + use_overflow == value.shape[value_n]:
start += has_underflow and not use_underflow
stop += has_underflow + (has_overflow and use_overflow)
# Single element broadcasting
elif value.shape[value_n] == 1:
start += has_underflow
stop += has_underflow
else:
msg = "Mismatched shapes in dimension {0}".format(n)
msg += ", {0} != {1}".format(value.shape[n], request_len)
if use_underflow or use_overflow:
msg += " or {0}".format(
request_len + use_underflow + use_overflow
)
raise ValueError(msg)
indexes[n] = slice(start, stop, request.step)
value_n += 1
else:
indexes[n] = request + has_underflow
view[tuple(indexes)] = value
def project(self, *args):
"""
Project to a single axis or several axes on a multidiminsional histogram.
Provided a list of axis numbers, this will produce the histogram over
those axes only. Flow bins are used if available.
"""
return self.__class__(self._hist.project(*args))
|
test_state.py
|
"""
Tests for the state runner
"""
import errno
import logging
import os
import queue
import shutil
import signal
import tempfile
import textwrap
import threading
import time
import salt.exceptions
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.yaml
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, flaky, slowTest
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
@flaky
class StateRunnerTest(ShellCase):
"""
Test the state runner.
"""
RUN_TIMEOUT = 300
def add_to_queue(self, q, cmd):
"""
helper method to add salt-run
return data to a queue
"""
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
@slowTest
def test_orchestrate_output(self):
"""
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
"""
ret_output = self.run_run("state.orchestrate orch.simple")
bad_out = ["outputter:", " highstate"]
good_out = [
" Function: salt.state",
" Result: True",
"Succeeded: 1 (changed=1)",
"Failed: 0",
"Total states run: 1",
]
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
@slowTest
def test_orchestrate_nested(self):
"""
test salt-run state.orchestrate and failhard with nested orchestration
"""
if os.path.exists("/tmp/ewu-2016-12-13"):
os.remove("/tmp/ewu-2016-12-13")
_, code = self.run_run("state.orchestrate nested-orch.outer", with_retcode=True)
assert os.path.exists("/tmp/ewu-2016-12-13") is False
assert code != 0
@slowTest
def test_orchestrate_with_mine(self):
"""
test salt-run state.orchestrate with mine.get call in sls
"""
fail_time = time.time() + 120
self.run_run('mine.update "*"')
exp_ret = "Succeeded: 1 (changed=1)"
while True:
ret = self.run_run("state.orchestrate orch.mine")
try:
assert exp_ret in ret
break
except AssertionError:
if time.time() > fail_time:
self.fail(
'"{}" was not found in the orchestration call'.format(exp_ret)
)
@slowTest
def test_orchestrate_state_and_function_failure(self):
"""
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue43204 --out=json"))
)
# Drill down to the changes dict
state_ret = ret["data"]["master"]["salt_|-Step01_|-Step01_|-state"]["changes"]
func_ret = ret["data"]["master"][
"salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function"
]["changes"]
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ("duration", "start_time"):
state_ret["ret"]["minion"][
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes"
].pop(item)
self.assertEqual(
state_ret,
{
"out": "highstate",
"ret": {
"minion": {
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes": {
"__id__": "test fail with changes",
"__run_num__": 0,
"__sls__": "orch.issue43204.fail_with_changes",
"changes": {
"testing": {
"new": "Something pretended to change",
"old": "Unchanged",
}
},
"comment": "Failure!",
"name": "test fail with changes",
"result": False,
}
}
},
},
)
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@slowTest
def test_orchestrate_target_exists(self):
"""
test orchestration when target exists
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-exists")
first = [" ID: core", " Function: salt.state", " Result: True"]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@slowTest
def test_orchestrate_retcode(self):
"""
Test orchestration with nonzero retcode set in __context__
"""
self.run_run("saltutil.sync_runners")
self.run_run("saltutil.sync_wheel")
ret = "\n".join(self.run_run("state.orchestrate orch.retcode"))
for result in (
" ID: test_runner_success\n"
" Function: salt.runner\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_runner_failure\n"
" Function: salt.runner\n"
" Name: runtests_helpers.failure\n"
" Result: False",
" ID: test_wheel_success\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_wheel_failure\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.failure\n"
" Result: False",
):
self.assertIn(result, ret)
@slowTest
def test_orchestrate_target_does_not_exist(self):
"""
test orchestration when target doesn't exist
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-does-not-exist")
first = [
"No minions matched the target. No command was sent, no jid was assigned.",
" ID: core",
" Function: salt.state",
" Result: False",
]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@slowTest
def test_orchestrate_batch_with_failhard_error(self):
"""
test orchestration properly stops with failhard and batch.
"""
ret = self.run_run("state.orchestrate orch.batch --out=json -l critical")
ret_json = salt.utils.json.loads("\n".join(ret))
retcode = ret_json["retcode"]
result = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["result"]
changes = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["changes"]
# Looks like there is a platform differences in execution.
# I see empty changes dict in MacOS for some reason. Maybe it's a bug?
if changes:
changes_ret = changes["ret"]
# Debug
print("Retcode: {}".format(retcode))
print("Changes: {}".format(changes))
print("Result: {}".format(result))
assert retcode != 0
assert result is False
if changes:
# The execution should stop after first error, so return dict should contain only one minion
assert len(changes_ret) == 1
@slowTest
def test_state_event(self):
"""
test to ensure state.event
runner returns correct data
"""
q = queue.Queue(maxsize=0)
cmd = "state.event salt/job/*/new count=1"
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt("minion test.ping --static")
out = q.get()
assert expect in str(out)
server_thread.join()
@slowTest
def test_orchestrate_subset(self):
"""
test orchestration state using subset
"""
ret = self.run_run("state.orchestrate orch.subset", timeout=500)
def count(thing, listobj):
return sum([obj.strip() == thing for obj in listobj])
assert count("ID: test subset", ret) == 1
assert count("Succeeded: 1", ret) == 1
assert count("Failed: 0", ret) == 1
@slowTest
def test_orchestrate_salt_function_return_false_failure(self):
"""
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue30367 --out=json"))
)
# Drill down to the changes dict
state_result = ret["data"]["master"][
"salt_|-deploy_check_|-test.false_|-function"
]["result"]
func_ret = ret["data"]["master"]["salt_|-deploy_check_|-test.false_|-function"][
"changes"
]
assert state_result is False
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@skipIf(salt.utils.platform.is_windows(), "*NIX-only test")
@flaky
class OrchEventTest(ShellCase):
"""
Tests for orchestration events
"""
RUN_TIMEOUT = 300
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master.d")
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode="w", suffix=".conf", dir=self.master_d_dir, delete=True,
)
self.base_env = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ("timeout", "master_d_dir", "conf", "base_env"):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, "test.arg")
def alarm_handler(self, signal, frame):
raise Exception("Timeout of {} seconds reached".format(self.timeout))
def write_conf(self, data):
"""
Dump the config dict to the conf file
"""
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
@expensiveTest
def test_jid_in_ret_event(self):
"""
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
state_sls = os.path.join(self.base_env, "test_state.sls")
with salt.utils.files.fopen(state_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date:
cmd.run
"""
)
)
)
orch_sls = os.path.join(self.base_env, "test_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
"""
)
)
)
with salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
) as listener:
jid = self.run_run_plus("state.orchestrate", "test_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] == "salt/run/{}/ret".format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for job in ret:
self.assertTrue("__jid__" in ret[job])
break
finally:
signal.alarm(0)
@expensiveTest
def test_parallel_orchestrations(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{% for count in range(1, 20) %}
sleep {{ count }}:
module.run:
- name: test.sleep
- length: 10
- parallel: True
{% endfor %}
sleep 21:
module.run:
- name: test.sleep
- length: 10
- parallel: True
- require:
- module: sleep 1
"""
)
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
with salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
) as listener:
start_time = time.time()
jid = self.run_run_plus("state.orchestrate", "test_par_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# if we receive the ret for this job before self.timeout (60),
# the test is implicitly successful; if it were happening in serial it would be
# atleast 110 seconds.
if event["tag"] == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# we expect each duration to be greater than 10s
self.assertTrue(data["duration"] > 10000)
break
# self confirm that the total runtime is roughly 30s (left 10s for buffer)
self.assertTrue((time.time() - start_time) < 40)
finally:
self.assertTrue(received)
signal.alarm(0)
@expensiveTest
def test_orchestration_soft_kill(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "two_stage_orch_kill.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
stage_one:
test.succeed_without_changes
stage_two:
test.fail_without_changes
"""
)
)
with salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
) as listener:
mock_jid = "20131219120000000000"
self.run_run("state.soft_kill {} stage_two".format(mock_jid))
with patch("salt.utils.jid.gen_jid", MagicMock(return_value=mock_jid)):
jid = self.run_run_plus("state.orchestrate", "two_stage_orch_kill").get(
"jid"
)
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# Ensure that stage_two of the state does not run
if event["tag"] == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
self.assertNotIn(
"test_|-stage_two_|-stage_two_|-fail_without_changes", ret
)
break
finally:
self.assertTrue(received)
signal.alarm(0)
@slowTest
def test_orchestration_with_pillar_dot_items(self):
"""
Test to confirm when using a state file that includes other state file, if
one of those state files includes pillar related functions that will not
be pulling from the pillar cache that all the state files are available and
the file_roots has been preserved. See issues #48277 and #46986.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "main.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
include:
- one
- two
- three
"""
)
)
orch_sls = os.path.join(self.base_env, "one.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{%- set foo = salt['saltutil.runner']('pillar.show_pillar') %}
placeholder_one:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "two.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_two:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "three.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_three:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "main.sls")
with salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
) as listener:
jid = self.run_run_plus("state.orchestrate", "main").get("jid")
if jid is None:
raise salt.exceptions.SaltInvocationError(
"jid missing from run_run_plus output"
)
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event.get("tag", "") == "salt/run/{}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# Each state should be successful
self.assertEqual(data["comment"], "Success!")
break
finally:
self.assertTrue(received)
signal.alarm(0)
@slowTest
def test_orchestration_onchanges_and_prereq(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
manage_a_file:
salt.state:
- tgt: minion
- sls:
- orch.req_test
do_onchanges:
salt.function:
- tgt: minion
- name: test.ping
- onchanges:
- salt: manage_a_file
do_prereq:
salt.function:
- tgt: minion
- name: test.ping
- prereq:
- salt: manage_a_file
"""
)
)
with salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
) as listener:
try:
jid1 = self.run_run_plus("state.orchestrate", "orch", test=True).get(
"jid"
)
# Run for real to create the file
self.run_run_plus("state.orchestrate", "orch").get("jid")
# Run again in test mode. Since there were no changes, the
# requisites should not fire.
jid2 = self.run_run_plus("state.orchestrate", "orch", test=True).get(
"jid"
)
finally:
try:
os.remove(os.path.join(RUNTIME_VARS.TMP, "orch.req_test"))
except OSError:
pass
assert jid1 is not None
assert jid2 is not None
tags = {"salt/run/{}/ret".format(x): x for x in (jid1, jid2)}
ret = {}
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] in tags:
ret[tags.pop(event["tag"])] = self.repack_state_returns(
event["data"]["return"]["data"]["master"]
)
if not tags:
# If tags is empty, we've grabbed all the returns we
# wanted, so let's stop listening to the event bus.
break
finally:
del listener
signal.alarm(0)
for sls_id in ("manage_a_file", "do_onchanges", "do_prereq"):
# The first time through, all three states should have a None
# result, while the second time through, they should all have a
# True result.
assert (
ret[jid1][sls_id]["result"] is None
), "result of {} ({}) is not None".format(
sls_id, ret[jid1][sls_id]["result"]
)
assert (
ret[jid2][sls_id]["result"] is True
), "result of {} ({}) is not True".format(
sls_id, ret[jid2][sls_id]["result"]
)
# The file.managed state should have shown changes in the test mode
# return data.
assert ret[jid1]["manage_a_file"]["changes"]
# After the file was created, running again in test mode should have
# shown no changes.
assert not ret[jid2]["manage_a_file"]["changes"], ret[jid2]["manage_a_file"][
"changes"
]
|
test_threading.py
|
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
try:
import _testcapi
except ImportError:
_testcapi = None
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.test_support.cpython_only
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
@test.test_support.cpython_only
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
@test.test_support.cpython_only
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@cpython_only
@unittest.skipIf(_testcapi is None, "need _testcapi module")
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_print_exception(self):
script = r"""if 1:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if 1:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if 1:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1.0/0.0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, '')
self.assertNotIn("Unhandled exception", err)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
bootstrap.py
|
"""
Bootstrap an installation of TLJH.
Sets up just enough TLJH environments to invoke tljh.installer.
This script is run as:
curl <script-url> | sudo python3 -
Constraints:
- Entire script should be compatible with Python 3.6 (We run on Ubuntu 18.04+)
- Script should parse in Python 3.4 (since we exit with useful error message on Ubuntu 14.04+)
- Use stdlib modules only
"""
import os
from http.server import SimpleHTTPRequestHandler, HTTPServer
import multiprocessing
import subprocess
import sys
import logging
import shutil
import urllib.request
html = """
<html>
<head>
<title>The Littlest Jupyterhub</title>
</head>
<body>
<meta http-equiv="refresh" content="30" >
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<img class="logo" src="https://raw.githubusercontent.com/jupyterhub/the-littlest-jupyterhub/master/docs/images/logo/logo.png">
<div class="loader center"></div>
<div class="center main-msg">Please wait while your TLJH is building...</div>
<div class="center logs-msg">Click the button below to see the logs</div>
<div class="center tip" >Tip: to update the logs, refresh the page</div>
<button class="logs-button center" onclick="window.location.href='/logs'">View logs</button>
</body>
<style>
button:hover {
background: grey;
}
.logo {
width: 150px;
height: auto;
}
.center {
margin: 0 auto;
margin-top: 50px;
text-align:center;
display: block;
}
.main-msg {
font-size: 30px;
font-weight: bold;
color: grey;
text-align:center;
}
.logs-msg {
font-size: 15px;
color: grey;
}
.tip {
font-size: 13px;
color: grey;
margin-top: 10px;
font-style: italic;
}
.logs-button {
margin-top:15px;
border: 0;
color: white;
padding: 15px 32px;
font-size: 16px;
cursor: pointer;
background: #f5a252;
}
.loader {
width: 150px;
height: 150px;
border-radius: 90%;
border: 7px solid transparent;
animation: spin 2s infinite ease;
animation-direction: alternate;
}
@keyframes spin {
0% {
transform: rotateZ(0deg);
border-top-color: #f17c0e
}
100% {
transform: rotateZ(360deg);
border-top-color: #fce5cf;
}
}
</style>
</head>
</html>
"""
logger = logging.getLogger(__name__)
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return subprocess.check_output([
'/bin/bash', '-c',
"source /etc/os-release && echo ${{{key}}}".format(key=key)
]).decode().strip()
# Copied into tljh/utils.py. Make sure the copies are exactly the same!
def run_subprocess(cmd, *args, **kwargs):
"""
Run given cmd with smart output behavior.
If command succeeds, print output to debug logging.
If it fails, print output to info logging.
In TLJH, this sends successful output to the installer log,
and failed output directly to the user's screen
"""
logger = logging.getLogger('tljh')
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs)
printable_command = ' '.join(cmd)
if proc.returncode != 0:
# Our process failed! Show output to the user
logger.error('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
logger.error(proc.stdout.decode())
raise subprocess.CalledProcessError(cmd=cmd, returncode=proc.returncode)
else:
# This goes into installer.log
logger.debug('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
# This produces multi line log output, unfortunately. Not sure how to fix.
# For now, prioritizing human readability over machine readability.
logger.debug(proc.stdout.decode())
def validate_host():
"""
Make sure TLJH is installable in current host
"""
# Support only Ubuntu 18.04+
distro = get_os_release_variable('ID')
version = float(get_os_release_variable('VERSION_ID'))
if distro != 'ubuntu':
print('The Littlest JupyterHub currently supports Ubuntu Linux only')
sys.exit(1)
elif float(version) < 18.04:
print('The Littlest JupyterHub requires Ubuntu 18.04 or higher')
sys.exit(1)
if sys.version_info < (3, 5):
print("bootstrap.py must be run with at least Python 3.5")
sys.exit(1)
if not (shutil.which('systemd') and shutil.which('systemctl')):
print("Systemd is required to run TLJH")
# Only fail running inside docker if systemd isn't present
if os.path.exists('/.dockerenv'):
print("Running inside a docker container without systemd isn't supported")
print("We recommend against running a production TLJH instance inside a docker container")
print("For local development, see http://tljh.jupyter.org/en/latest/contributing/dev-setup.html")
sys.exit(1)
class LoaderPageRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/logs":
with open("/opt/tljh/installer.log", "r") as log_file:
logs = log_file.read()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(logs.encode('utf-8'))
elif self.path == "/index.html":
self.path = "/var/run/index.html"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/favicon.ico":
self.path = "/var/run/favicon.ico"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/":
self.send_response(302)
self.send_header('Location','/index.html')
self.end_headers()
else:
SimpleHTTPRequestHandler.send_error(self, code=403)
def serve_forever(server):
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def main():
flags = sys.argv[1:]
temp_page_flag = "--show-progress-page"
# Check for flag in the argv list. This doesn't use argparse
# because it's the only argument that's meant for the boostrap script.
# All the other flags will be passed to and parsed by the installer.
if temp_page_flag in flags:
with open("/var/run/index.html", "w+") as f:
f.write(html)
favicon_url="https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/share/jupyterhub/static/favicon.ico"
urllib.request.urlretrieve(favicon_url, "/var/run/favicon.ico")
# If the bootstrap is run to upgrade TLJH, then this will raise an "Address already in use" error
try:
loading_page_server = HTTPServer(("", 80), LoaderPageRequestHandler)
p = multiprocessing.Process(target=serve_forever, args=(loading_page_server,))
# Serves the loading page until TLJH builds
p.start()
# Remove the flag from the args list, since it was only relevant to this script.
flags.remove("--show-progress-page")
# Pass the server's pid as a flag to the istaller
pid_flag = "--progress-page-server-pid"
flags.extend([pid_flag, str(p.pid)])
except OSError:
# Only serve the loading page when installing TLJH
pass
validate_host()
install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')
hub_prefix = os.path.join(install_prefix, 'hub')
# Set up logging to print to a file and to stderr
os.makedirs(install_prefix, exist_ok=True)
file_logger_path = os.path.join(install_prefix, 'installer.log')
file_logger = logging.FileHandler(file_logger_path)
# installer.log should be readable only by root
os.chmod(file_logger_path, 0o500)
file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
file_logger.setLevel(logging.DEBUG)
logger.addHandler(file_logger)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter('%(message)s'))
stderr_logger.setLevel(logging.INFO)
logger.addHandler(stderr_logger)
logger.setLevel(logging.DEBUG)
logger.info('Checking if TLJH is already installed...')
if os.path.exists(os.path.join(hub_prefix, 'bin', 'python3')):
logger.info('TLJH already installed, upgrading...')
initial_setup = False
else:
logger.info('Setting up hub environment')
initial_setup = True
# Install software-properties-common, so we can get add-apt-repository
# That helps us make sure the universe repository is enabled, since
# that's where the python3-pip package lives. In some very minimal base
# VM images, it looks like the universe repository is disabled by default,
# causing bootstrapping to fail.
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes', 'software-properties-common'])
run_subprocess(['add-apt-repository', 'universe'])
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes',
'python3',
'python3-venv',
'python3-pip',
'git'
])
logger.info('Installed python & virtual environment')
os.makedirs(hub_prefix, exist_ok=True)
run_subprocess(['python3', '-m', 'venv', hub_prefix])
logger.info('Set up hub virtual environment')
if initial_setup:
logger.info('Setting up TLJH installer...')
else:
logger.info('Upgrading TLJH installer...')
pip_flags = ['--upgrade']
if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':
pip_flags.append('--editable')
tljh_repo_path = os.environ.get(
'TLJH_BOOTSTRAP_PIP_SPEC',
'git+https://github.com/jupyterhub/the-littlest-jupyterhub.git'
)
# Upgrade pip
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install',
'--upgrade',
'pip==20.0.*'
])
logger.info('Upgraded pip')
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install'
] + pip_flags + [tljh_repo_path])
logger.info('Setup tljh package')
logger.info('Starting TLJH installer...')
os.execv(
os.path.join(hub_prefix, 'bin', 'python3'),
[
os.path.join(hub_prefix, 'bin', 'python3'),
'-m',
'tljh.installer',
] + flags
)
if __name__ == '__main__':
main()
|
utils.py
|
import asyncio
from asyncio import TimeoutError
import atexit
import click
from collections import deque, OrderedDict, UserDict
from concurrent.futures import ThreadPoolExecutor, CancelledError # noqa: F401
from contextlib import contextmanager, suppress
import functools
from hashlib import md5
import html
import json
import logging
import multiprocessing
import os
import re
import shutil
import socket
from time import sleep
import importlib
from importlib.util import cache_from_source
import inspect
import sys
import tempfile
import threading
import warnings
import weakref
import pkgutil
import base64
import tblib.pickling_support
import xml.etree.ElementTree
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa
format_bytes,
funcname,
format_time,
parse_bytes,
parse_timedelta,
)
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS, get_running_loop
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import required_packages, optional_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
# FIXME: this breaks if changed to async def...
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
"""Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with suppress(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = parse_timedelta(callback_timeout, "s")
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
>>> key_split_group('x-1')
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return key_split(x)
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def is_writeable(frame):
"""
Check whether frame is writeable
Will return ``True`` if writeable, ``False`` if readonly, and
``None`` if undetermined.
"""
try:
return not memoryview(frame).readonly
except TypeError:
return None
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
# TODO: Use tornado's AnyThreadEventLoopPolicy, instead of class below,
# once tornado > 6.0.3 is available.
if WINDOWS and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
# https://github.com/tornadoweb/tornado/issues/2608
BaseEventLoopPolicy = asyncio.WindowsSelectorEventLoopPolicy
else:
BaseEventLoopPolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(BaseEventLoopPolicy):
def get_event_loop(self):
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = set(
[
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
]
)
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
"""Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
>>> [8787]
or a string:
>>> parse_ports("8787")
>>> [8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
>>> [8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
>>> [None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
def is_coroutine_function(f):
return asyncio.iscoroutinefunction(f) or gen.is_coroutine_function(f)
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d: dict
The keywords to convert
cls: callable
The callable that consumes these terms to check them for validity
cmd: string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
else:
raise ValueError(
"Module %s does not support keyword %s" % (typename(cmd), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
try:
_offload_executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="Dask-Offload"
)
except TypeError:
_offload_executor = ThreadPoolExecutor(max_workers=1)
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin")
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
def serialize_for_cli(data):
"""Serialize data into a string that can be passthrough cli
Parameters
----------
data: json-serializable object
The data to serialize
Returns
-------
serialized_data: str
The serialized data as a string
"""
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
def deserialize_for_cli(data):
"""De-serialize data into the original object
Parameters
----------
data: str
String serialied by serialize_for_cli()
Returns
-------
deserialized_data: obj
The de-serialized data
"""
return json.loads(base64.urlsafe_b64decode(data.encode()).decode())
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addr, default_listen_ip=""):
"""
Examples
--------
>>> clean_dashboard_address(8787)
{'address': '', 'port': 8787}
>>> clean_dashboard_address(":8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("8787")
{'address': '', 'port': 8787}
>>> clean_dashboard_address("foo:8787")
{'address': 'foo', 'port': 8787}
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
return {"address": host, "port": port}
|
dispatcher.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run on the dispatcher. Builds each benchmark with each fuzzing
configuration, spawns a runner VM for each benchmark-fuzzer combo, and then
records coverage data received from the runner VMs."""
import datetime
import multiprocessing
import os
import sys
import threading
import time
from typing import List
from common import experiment_path as exp_path
from common import experiment_utils
from common import logs
from common import yaml_utils
from database import models
from database import utils as db_utils
from experiment.build import builder
from experiment.measurer import measure_manager
from experiment import reporter
from experiment import scheduler
from experiment import stop_experiment
LOOP_WAIT_SECONDS = 5 * 60
# TODO(metzman): Convert more uses of os.path.join to exp_path.path.
def _get_config_file_path():
"""Return config file path."""
return exp_path.path(
experiment_utils.get_internal_experiment_config_relative_path())
def create_work_subdirs(subdirs: List[str]):
"""Create |subdirs| in work directory."""
for subdir in subdirs:
os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir))
def _initialize_experiment_in_db(experiment_config: dict):
"""Initializes |experiment| in the database by creating the experiment
entity."""
with db_utils.session_scope() as session:
experiment_exists = session.query(models.Experiment).filter(
models.Experiment.name == experiment_config['experiment']).first()
if experiment_exists:
raise Exception('Experiment already exists in database.')
db_utils.add_all([
db_utils.get_or_create(
models.Experiment,
name=experiment_config['experiment'],
git_hash=experiment_config['git_hash'],
private=experiment_config.get('private', True),
experiment_filestore=experiment_config['experiment_filestore'],
description=experiment_config['description']),
])
def _record_experiment_time_ended(experiment_name: str):
"""Record |experiment| end time in the database."""
with db_utils.session_scope() as session:
experiment = session.query(models.Experiment).filter(
models.Experiment.name == experiment_name).one()
experiment.time_ended = datetime.datetime.utcnow()
db_utils.add_all([experiment])
def _initialize_trials_in_db(trials: List[models.Trial]):
"""Initializes entities for each trial in the experiment."""
# TODO(metzman): Consider doing this without sqlalchemy. This can get
# slow with SQLalchemy (it's much worse with add_all).
db_utils.bulk_save(trials)
class Experiment: # pylint: disable=too-many-instance-attributes
"""Class representing an experiment."""
def __init__(self, experiment_config_filepath: str):
self.config = yaml_utils.read(experiment_config_filepath)
self.benchmarks = self.config['benchmarks']
self.fuzzers = self.config['fuzzers']
self.num_trials = self.config['trials']
self.experiment_name = self.config['experiment']
self.git_hash = self.config['git_hash']
self.preemptible = self.config.get('preemptible_runners')
def build_images_for_trials(fuzzers: List[str],
benchmarks: List[str],
num_trials: int,
preemptible: bool,
concurrent_builds=None) -> List[models.Trial]:
"""Builds the images needed to run |experiment| and returns a list of trials
that can be run for experiment. This is the number of trials specified in
experiment times each pair of fuzzer+benchmark that builds successfully."""
# This call will raise an exception if the images can't be built which will
# halt the experiment.
builder.build_base_images()
# Only build fuzzers for benchmarks whose measurers built successfully.
if concurrent_builds is None:
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(
fuzzers, benchmarks)
else:
benchmarks = builder.build_all_measurers(benchmarks, concurrent_builds)
build_successes = builder.build_all_fuzzer_benchmarks(
fuzzers, benchmarks, concurrent_builds)
experiment_name = experiment_utils.get_experiment_name()
trials = []
for fuzzer, benchmark in build_successes:
fuzzer_benchmark_trials = [
models.Trial(fuzzer=fuzzer,
experiment=experiment_name,
benchmark=benchmark,
preemptible=preemptible) for _ in range(num_trials)
]
trials.extend(fuzzer_benchmark_trials)
return trials
def dispatcher_main():
"""Do the experiment and report results."""
logs.info('Starting experiment.')
# Set this here because we get failures if we do it in measurer for some
# reason.
multiprocessing.set_start_method('spawn')
db_utils.initialize()
if experiment_utils.is_local_experiment():
models.Base.metadata.create_all(db_utils.engine)
experiment_config_file_path = _get_config_file_path()
experiment = Experiment(experiment_config_file_path)
_initialize_experiment_in_db(experiment.config)
trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
experiment.num_trials,
experiment.preemptible,
experiment.config['concurrent_builds'])
_initialize_trials_in_db(trials)
create_work_subdirs(['experiment-folders', 'measurement-folders'])
# Start measurer and scheduler in seperate threads/processes.
scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
args=(experiment.config,))
scheduler_loop_thread.start()
measurer_main_process = multiprocessing.Process(
target=measure_manager.measure_main, args=(experiment.config,))
measurer_main_process.start()
is_complete = False
while True:
time.sleep(LOOP_WAIT_SECONDS)
if not scheduler_loop_thread.is_alive():
is_complete = not measurer_main_process.is_alive()
# Generate periodic output reports.
reporter.output_report(experiment.config,
in_progress=not is_complete,
coverage_report=is_complete)
if is_complete:
# Experiment is complete, bail out.
break
scheduler_loop_thread.join()
measurer_main_process.join()
_record_experiment_time_ended(experiment.experiment_name)
logs.info('Experiment ended.')
def main():
"""Do the experiment and report results."""
logs.initialize(default_extras={
'component': 'dispatcher',
})
try:
dispatcher_main()
except Exception as error:
logs.error('Error conducting experiment.')
raise error
if experiment_utils.is_local_experiment():
return 0
experiment_config_file_path = _get_config_file_path()
if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),
experiment_config_file_path):
return 0
return 1
if __name__ == '__main__':
sys.exit(main())
|
socket_server.py
|
import socket
import threading
import time
import json
server_ip = '0.0.0.0'
server_port = 8091
is_accepted = False
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((server_ip, server_port))
server.listen(10)
# print('[*] listening on ' + self.server_ip + ':' + str(self.server_port))
def handle_client(client, port):
from database import db
import json
request = client.recv(1024).decode()
this_data = json.loads(request)
print('json date from : ',this_data["date_from"],'date to : '+this_data["date_to"])
this_db = db()
report = this_db.reportConsumption(this_data['date_from'],this_data['date_to'])
print('report is : ' ,report)
this_db.closedb()
json_report = json.dumps(report,sort_keys=True)
print( json_report)
client.send(json_report)
client.close()
# time.sleep(2)
while True:
client, port = server.accept()
is_accepted = True
client,port = client, port
# print('connection acceped!')
handler_client = threading.Thread(
target=handle_client, args=(client, port))
handler_client.run()
|
parallel_runner.py
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
state_test.py
|
import numpy as np
import pytest
from procgen import ProcgenGym3Env
from .env import ENV_NAMES
import gym3
import multiprocessing as mp
NUM_STEPS = 10000
def gather_rollouts(
env_kwargs, actions, state=None, get_state=False, set_state_every_step=False
):
env = ProcgenGym3Env(**env_kwargs)
if state is not None:
env.callmethod("set_state", state)
result = [dict(ob=env.observe(), info=env.get_info())]
if get_state:
result[-1]["state"] = env.callmethod("get_state")
if set_state_every_step:
env.callmethod("set_state", result[-1]["state"])
for act in actions:
env.act(act)
result.append(dict(ob=env.observe(), info=env.get_info()))
if get_state:
result[-1]["state"] = env.callmethod("get_state")
if set_state_every_step:
env.callmethod("set_state", result[-1]["state"])
return result
def fn_wrapper(fn, result_queue, **kwargs):
result = fn(**kwargs)
result_queue.put(result)
def run_in_subproc(fn, **kwargs):
ctx = mp.get_context("spawn")
result_queue = ctx.Queue()
p = ctx.Process(
target=fn_wrapper, kwargs=dict(fn=fn, result_queue=result_queue, **kwargs)
)
p.start()
result = result_queue.get()
p.join()
return result
def assert_rollouts_identical(a_rollout, b_rollout):
assert len(a_rollout) == len(b_rollout)
for a, b in zip(a_rollout, b_rollout):
assert a["info"] == b["info"]
a_rew, a_ob, a_first = a["ob"]
b_rew, b_ob, b_first = b["ob"]
assert np.array_equal(a_rew, b_rew)
assert np.array_equal(a_first, b_first)
assert sorted(a_ob.keys()) == sorted(b_ob.keys())
for k in sorted(a_ob.keys()):
assert np.array_equal(a_ob[k], b_ob[k])
if "state" in a and "state" in b:
assert a["state"] == b["state"]
@pytest.mark.skip(reason="slow")
@pytest.mark.parametrize("env_name", ENV_NAMES)
def test_state(env_name):
run_state_test(env_name)
def run_state_test(env_name):
env_kwargs = dict(num=2, env_name=env_name, rand_seed=0)
env = ProcgenGym3Env(**env_kwargs)
rng = np.random.RandomState(0)
actions = [
gym3.types_np.sample(env.ac_space, bshape=(env.num,), rng=rng)
for _ in range(NUM_STEPS)
]
ref_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions
)
assert len(ref_rollouts) == NUM_STEPS + 1
# run the same thing a second time
basic_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions
)
assert_rollouts_identical(ref_rollouts, basic_rollouts)
# run but save states
state_rollouts = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions, get_state=True
)
assert_rollouts_identical(ref_rollouts, state_rollouts)
# make sure states are the same
state_rollouts_2 = run_in_subproc(
gather_rollouts, env_kwargs=env_kwargs, actions=actions, get_state=True
)
assert_rollouts_identical(ref_rollouts, state_rollouts_2)
assert_rollouts_identical(state_rollouts, state_rollouts_2)
# save and restore at each timestep
state_rollouts_3 = run_in_subproc(
gather_rollouts,
env_kwargs=env_kwargs,
actions=actions,
get_state=True,
set_state_every_step=True,
)
assert_rollouts_identical(ref_rollouts, state_rollouts_3)
assert_rollouts_identical(state_rollouts, state_rollouts_3)
# restore a point in the middle of the rollout and make sure that the remainder of the data looks the same
offset = NUM_STEPS // 2
state_restore_rollouts = run_in_subproc(
gather_rollouts,
env_kwargs={**env_kwargs, "rand_seed": 1},
actions=actions[offset:],
state=state_rollouts[offset]["state"],
get_state=True,
)
assert_rollouts_identical(ref_rollouts[offset:], state_restore_rollouts)
assert_rollouts_identical(state_rollouts[offset:], state_restore_rollouts)
|
face_det_sfd.py
|
import os
import cv2
import glob
import time
import face_alignment
from multiprocessing import Pool, Process, Queue
def run(gpu, files):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False, device='cuda')
print('gpu={},n_files={}'.format(gpu, len(files)))
tic = time.time()
count = 0
for (img_name, savename) in files:
I = cv2.imread(img_name)
points_list = fa.get_landmarks(I)
with open(savename, 'w') as f:
if(points_list is not None):
for points in points_list:
for (x, y) in points:
f.write('({}, {})\t'.format(x, y))
f.write('\n')
count += 1
if(count % 1000 == 0):
print('dst={},eta={}'.format(savename, (time.time()-tic)/(count) * (len(files) - count) / 3600.0))
if(__name__ == '__main__'):
with open('imgs.txt', 'r') as f:
folders = [line.strip() for line in f.readlines()]
data = []
for folder in folders:
files = glob.glob(folder + "/*")
for f in files:
data.append(f)
data = [(name, name.replace('.jpg', '.txt')) for name in data]
for (_, dst) in data:
dir, _ = os.path.split(dst)
if(not os.path.exists(dir)):
os.makedirs(dir)
processes = []
n_p = 8
#gpus = ['0']
bs = len(data) // n_p
for i in range(n_p):
if(i == n_p - 1):
bs = len(data)
p = Process(target=run, args=('0',data[:bs],))
data = data[bs:]
p.start()
processes.append(p)
assert(len(data) == 0)
for p in processes:
p.join()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2668
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
msfs_landing_inspector.py
|
from flask import Flask, render_template, jsonify, request
from SimConnect import *
from threading import Thread
simconnect_dict = {}
def flask_thread_func(threadname):
global simconnect_dict
app = Flask(__name__)
@app.route('/_stuff', methods = ['GET'])
def stuff():
global simconnect_dict
return jsonify(simconnect_dict)
@app.route('/')
def index():
return render_template('body.html')
app.run(host='0.0.0.0', debug=False, use_reloader=False)
def simconnect_thread_func(threadname):
global simconnect_dict
# Init SimConnect
sm = SimConnect()
aq = AircraftRequests(sm, _time = 0)
# Init variables
airborne = False
v_speed_list = []
g_force_list = []
plane_alt_above_ground_list = []
sim_on_ground_list = [1,1,1,1,1,1,1,1]
run_app = 1
simconnect_dict["G_FORCE"] = 0
simconnect_dict["VERTICAL_SPEED"] = 0
simconnect_dict["SIM_ON_GROUND"] = 0
simconnect_dict["G_FORCE_LANDING"] = "N/A"
simconnect_dict["VERTICAL_SPEED_LANDING"] = "N/A"
simconnect_dict["G_FORCE_LANDING_LIST"] = "N/A"
simconnect_dict["VERTICAL_SPEED_LANDING_LIST"] = "N/A"
simconnect_dict["SIM_ON_GROUND_LIST"] = "N/A"
simconnect_dict["AIRBORNE"] = 0
simconnect_dict["G_FORCE_LIST"] = g_force_list
simconnect_dict["V_SPEED_LIST"] = v_speed_list
simconnect_dict["PLANE_ALT_ABOVE_GROUND_LIST"] = plane_alt_above_ground_list
simconnect_dict["LANDING_RATING"] = "N/A"
simconnect_dict["LANDING_COUNTER"] = 0
# Create empty labels for charts
labels_list = []
for i in range(150):
labels_list.append("")
simconnect_dict["LABELS"] = labels_list
# Run Simconnect Calculations
while run_app == 1:
if airborne == True and sum(sim_on_ground_list) == 30:
simconnect_dict["G_FORCE_LANDING"] = max(g_force_list)
max_gforce_index = g_force_list.index(max(g_force_list))
simconnect_dict["VERTICAL_SPEED_LANDING"] = v_speed_list[max_gforce_index]
simconnect_dict["G_FORCE_LANDING_LIST"] = g_force_list[::-1]*1
v_speed_list_neg = [elem * (-1) for elem in v_speed_list]
simconnect_dict["VERTICAL_SPEED_LANDING_LIST"] = v_speed_list_neg[::-1]*1
simconnect_dict["PLANE_ALT_ABOVE_GROUND_LIST"] = plane_alt_above_ground_list[::-1]*1
simconnect_dict["LANDING_COUNTER"] = simconnect_dict["LANDING_COUNTER"] + 1
# Landing Rating Based on G-Forces
if simconnect_dict["G_FORCE_LANDING"] < 1.25:
simconnect_dict["LANDING_RATING"] = "Smooth landing"
elif simconnect_dict["G_FORCE_LANDING"] < 1.5:
simconnect_dict["LANDING_RATING"] = "Acceptable landing"
elif simconnect_dict["G_FORCE_LANDING"] < 1.75:
simconnect_dict["LANDING_RATING"] = "Poor landing"
elif simconnect_dict["G_FORCE_LANDING"] < 2:
simconnect_dict["LANDING_RATING"] = "Hard landing"
elif simconnect_dict["G_FORCE_LANDING"] <= 2.5:
simconnect_dict["LANDING_RATING"] = "Very hard landing"
else:
simconnect_dict["LANDING_RATING"] = "Structural damage to plane"
airborne = False
if sum(sim_on_ground_list) == 0 and airborne == False:
airborne = True
# Get Current Data
simconnect_dict["G_FORCE"] = round(aq.get("G_FORCE"), 2)
simconnect_dict["VERTICAL_SPEED"] = round(aq.get("VERTICAL_SPEED"))
simconnect_dict["SIM_ON_GROUND"] = aq.get("SIM_ON_GROUND")
simconnect_dict["AIRBORNE"] = airborne
simconnect_dict["G_FORCE_LIST"] = g_force_list
# Make lists
v_speed_list.insert(0, simconnect_dict["VERTICAL_SPEED"])
if len(v_speed_list) > 151:
v_speed_list.pop()
g_force_list.insert(0, simconnect_dict["G_FORCE"])
if len(g_force_list) > 151:
g_force_list.pop()
sim_on_ground_list.insert(0, simconnect_dict["SIM_ON_GROUND"])
if len(sim_on_ground_list) > 31:
sim_on_ground_list.pop()
plane_alt_above_ground_list.insert(0, (round(aq.get("PLANE_ALT_ABOVE_GROUND"), 1)))
if len(plane_alt_above_ground_list) > 151:
plane_alt_above_ground_list.pop()
#print(f'SIMCONNECT: {simconnect_dict}')
if __name__ == "__main__":
thread1 = Thread(target = simconnect_thread_func, args=('Thread-1', ))
thread2 = Thread(target = flask_thread_func, args=('Thread-2', ))
thread1.start()
thread2.start()
|
main.py
|
from PyQt5.QtWidgets import QApplication, QPushButton, QVBoxLayout, QScrollArea, QGroupBox, QWidget, QFormLayout, \
QLabel, QLineEdit, QHBoxLayout, QBoxLayout, QSizePolicy,QStackedWidget ,QVBoxLayout,QGridLayout,QCheckBox,QMessageBox
from PyQt5 import QtCore,Qt
from PyQt5.QtGui import QFont
from PyQt5.QtMultimedia import QSound
from AddReminder import AddReminderWindow
from FileUtils import FileUtil
import threading
import time as Time
from datetime import datetime
import sys
import sip
class Window(QWidget):
reminderevent = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__()
self.w = None
self.title ='Reminder'
self.left = 500
self.top = 200
self.width_ = 600
self.height = 800
self.setStyleSheet("background-color: black;\n" "padding:0px;\n" "spacing:0px;\n")
self.InitWindow()
self.NoBoderStyleSheet = ("border :0px;\n")
layout = self.LoadLayout()
addReminder = QPushButton('Add')
addReminder.setStyleSheet("QPushButton{background-color: #52057b;\n color: White;\n border: 1px solid #52057b;\n border-radius:25px;\n padding:10px;\nspacing :10px; }"
"QPushButton::hover{background-color : #31034a;}\n")
addReminder.setMinimumHeight(50)
addReminder.setFont(QFont('Open Sans',15))
addReminder.clicked.connect(self.showAddReminderWindow)
layout.addWidget(addReminder)
self.setLayout(layout)
self.reminderevent.connect(self.showAlertWindow)
t = threading.Thread(target = self.showAlert)
t.deamon =True
t.start()
def mousePressEvent(self, event):
if event.button() == 1:
self.isMouseButtonDown = True
self.oldPos = event.globalPos()
def mouseReleaseEvent(self, event):
self.isMouseButtonDown = False
def mouseMoveEvent(self, event):
if self.isMouseButtonDown == True:
delta = (event.globalPos() - self.oldPos)
#print(delta)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.oldPos = event.globalPos()
def showAlertWindow(self,amsg):
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Pending Task")
msg.setInformativeText(amsg)
msg.setWindowTitle("Task Alert")
msg.exec_()
def showAlert(self):
file = FileUtil()
today = datetime.today()
time = datetime.now()
systime = time.strftime("%H:%M")
print(systime)
sysdate =today.strftime("%d-%m-%Y")
reminders ,count= FileUtil.loadRemindersFromFile(file)
for reminder in reminders:
r = reminder.split(';')
if r[1] == sysdate:
if r[2] == systime:
self.reminderevent.emit(r[0])
Time.sleep(60)
def showAddReminderWindow(self, checked):
if self.w is None:
self.w = AddReminderWindow()
#self.w.setWindowModality(Qt.WindowModal)
self.w.show()
self.w.Update.connect(self.UpdateReminders)
#self.UpdateReminders()
def Close(self):
self.close()
self.w.close()
def DoneWithReminder(self,button):
index = self.reminderContainer.indexOf(button.parent())
print(index)
file = FileUtil()
FileUtil.deleteReminder(self,index)
button.parent().deleteLater()
def upperFrame(self):
frame = QHBoxLayout()
frame.setContentsMargins(20,0, 0, 0)
Title = QLabel(self.title)
Title.setFont(QFont('Open Sans',15))
Title.setStyleSheet('color:white;\n')
frame.addWidget(Title)
frame.setSpacing(0)
Close = QPushButton('')
Close.setMinimumHeight(45)
Close.setMinimumWidth(45)
Close.setStyleSheet("QPushButton{background-color: black;\n color: White;\n border: 1px solid black;\n border-radius:25px;\n padding:10px;\n image: url(X.png);\n}"
"QPushButton::hover{background-color : #31034a;}\n")
Close.clicked.connect(lambda: self.close())
Minimize = QPushButton('')
Minimize.setStyleSheet("QPushButton{background-color: black;\n color: White;\n border: 1px solid black;\n border-radius:25px;\n padding:10px;\n image: url(Min.png);\n}"
"QPushButton::hover{background-color : #31034a;}\n")
Minimize.clicked.connect(lambda: self.showMinimized())
Minimize.setMinimumHeight(45)
Minimize.setMinimumWidth(45)
frame.addStretch()
frame.addWidget(Minimize)
frame.addWidget(Close)
tempBox = QGroupBox()
tempBox.setMaximumHeight(45)
tempBox.setStyleSheet("background-color: black;\n" "padding:0px;\n"+self.NoBoderStyleSheet)
tempBox.setLayout(frame)
return tempBox
def LoadLayout(self):
ScrollBoxStyleSheet = ("""
QScrollBar:vertical {
border: 0px solid #999999;
background:white;
width:5px;
margin: 0px 0px 0px 0px;
}
QScrollBar::handle:vertical {
background-color: gray ;
min-height: 0px;
border-radius:2px;
}
QScrollBar::add-line:vertical {
background-color: white;
height: 0px;
subcontrol-position: bottom;
subcontrol-origin: margin;
}
QScrollBar::sub-line:vertical {
background-color:white;
height: 0 px;
subcontrol-position: top;
subcontrol-origin: margin;
}
QScrollArea
{
border :0px;
}""")
root = QStackedWidget(self)
reminders ,count= FileUtil.loadRemindersFromFile(self)
self.reminderContainer = QVBoxLayout()
scroll = QScrollArea()
scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
scroll.setStyleSheet(ScrollBoxStyleSheet)
reminderGroupBox = QGroupBox()
self.reminderContainer.addStretch()
reminderGroupBox.setLayout(self.reminderContainer)
scroll.setWidget(reminderGroupBox)
if(count != 0):
for reminder in reminders:
self.reminderContainer.addWidget(self.reminderUI(reminder))
root.setStyleSheet("padding:0px;\n" + self.NoBoderStyleSheet)
#root.addWidget(reminderGroupBox)
#rootBox = QGroupBox()
#rootBox.setLayout(scroll)
root.addWidget(scroll)
templayout = QGridLayout()
templayout.setContentsMargins(0, 0, 0, 10)
templayout.setSpacing(0)
templayout.addWidget(self.upperFrame())
templayout.addWidget(root)
return templayout
def UpdateReminders(self):
while self.reminderContainer.count():
child = self.reminderContainer.takeAt(0)
if child.widget():
child.widget().deleteLater()
self.reminderContainer.addStretch()
reminders ,count= FileUtil.loadRemindersFromFile(self)
for reminder in reminders:
self.reminderContainer.addWidget(self.reminderUI(reminder))
def reminderUI(self, reminder):
reminderList = reminder.split(';',4)
reminderTitle = QLabel(reminderList[0])
reminderDate = QLabel(reminderList[1])
reminderStartTime = QLabel(reminderList[2])
reminderEndTime = QLabel(reminderList[3])
reminderTitle.setFont(QFont('Open Sans',15))
reminderDate.setFont(QFont('Open Sans',15))
reminderStartTime.setFont(QFont('Open Sans',15))
reminderEndTime.setFont(QFont('Open Sans',15))
reminderBox = QVBoxLayout()
reminderBox.addWidget(reminderTitle)
reminderBox.addWidget(reminderDate)
reminderBox.addWidget(reminderStartTime)
reminderBox.addWidget(reminderEndTime)
reminderBox2 = QHBoxLayout()
doneButton = QPushButton('Done')
doneButton.setStyleSheet(
"background-color: White;\n" "border: 1px solid white;\n" "border-radius:25px;\n" "padding:10px;\n""color: Black;\n" )
doneButton.setMinimumHeight(50)
doneButton.setMaximumWidth(100)
doneButton.clicked.connect(lambda: self.DoneWithReminder(doneButton))
temp = QGroupBox()
temp.setStyleSheet(self.NoBoderStyleSheet)
temp.setLayout(reminderBox)
reminderBox2.addWidget(temp)
reminderBox2.addWidget(doneButton)
temp2 = QGroupBox()
temp2.setMaximumHeight(150)
temp2.setStyleSheet('border-radius:25px;\n'"background-color: #a40eda;\n""border: 1px solid #a40eda;\n""color: White;\n")
temp2.setLayout(reminderBox2)
return temp2
def InitWindow(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width_, self.height)
flags = QtCore.Qt.WindowFlags(QtCore.Qt.FramelessWindowHint)
self.setWindowFlags(flags)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Window()
ex.show()
sys.exit(app.exec())
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import signal
import threading
import time
import traceback
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.summary import summary_ops as contrib_summary
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps runnining in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Start infeed thread controller')
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
logging.info('Start outfeed thread controller')
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provideded)
# batch 1: images, labels, stop = 0 (user provideded)
# ...
# batch 99: images, labels, stop = 0 (user provideded)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediciton, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _TPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separatedly to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
with ops.device(tpu.core(0)):
values = self._infeed_queue.generate_dequeue_op()
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
self._batch_axis, host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels):
return self._call_model_fn(features, labels)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=True):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Count examples during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
examples_per_sec = self._batch_size * elapsed_steps / elapsed_time
if self._summary_writer is not None:
example_summary = Summary(value=[
Summary.Value(tag='examples_sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(example_summary, global_step)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
3. Each batch returned by `Dataset`'s iterator must have the *same static*
shape. This means two things:
- batch_size cannot be `None`
- the final batch must be padded by user to a full batch.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda images: {'image': images})
def pad(tensor, missing_count):
# Pads out the batch dimension to the complete batch_size.
rank = len(tensor.shape)
assert rank > 0
padding = tf.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = tf.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def pad_batch_if_incomplete(batch_features):
# Pads out the batch dimension for all features.
real_batch_size = tf.shape(batch_features["image"])[0]
missing_count = tf.constant(batch_size, tf.int32) - real_batch_size
padded_features = {
key: pad(tensor, missing_count)
for key, tensor in batch_features.iteritems()
}
padding_mask = tf.concat(
[
tf.zeros((real_batch_size, 1), dtype=tf.int32),
tf.ones((missing_count, 1), dtype=tf.int32)
],
axis=0)
padding_mask.set_shape((batch_size, 1))
padded_features["is_padding"] = padding_mask
return padded_features
dataset = dataset.map(pad_batch_if_incomplete)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
Exporting `SavedModel` support on TPU is not yet implemented. So,
`export_savedmodel` is executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _TPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (not config.tpu_config.per_host_input_for_training and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# if features is callable, it means it is the input_fn passed by
# TPUEstimator._call_input_fn. Then we can know if the mode == PREDICT,
# it implies, it is the .predict API, not export_savedmodel API.
is_export_mode = not callable(features)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(features, labels)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
ExamplesPerSecondHook(ctx.global_batch_size),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': training.get_global_step()
},
every_n_secs=30)
] + input_hooks
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops),
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
stopping_signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
stopping_signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
# TODO(b/73813593): Delete this logging once the bug is resolved.
logging.info(
'If the Tensors in TPUEstimatorSpec.predictions dict are large, '
'you might observe the TPU program getting stuck (b/73813593). '
'Consider using small Tensors in the predictions dict to verify '
'the issue and report on the bug.')
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS],
name='loop')
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS],
name=b'loop')
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
# TODO(xiejw): Extend this to support final partial batch.
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = 0.0
STOPPING_SIGNAL = 1.0
def __init__(self, stop, batch_size):
self._stop = stop
self._batch_size = batch_size
def as_dict(self):
shape = [self._batch_size, 1]
dtype = dtypes.float32
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
return {'stopping': stopping}
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
return scalar_stopping_signal >= _StopSignals.STOPPING_SIGNAL
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
mp-pack-bz2.py
|
#!/usr/bin/env python
# Test of parallelizing bz2 and packing via multiprocessing
# Pushing record-lists through multiprocessing.Queue dies at 10 MiB/s
# Packing in the main thread seems to die at around 70 MiB/s, which is weird
# since split-lines can run at >140 MiB/s -- I guess packing + pickling is
# expensive.
import sys
import bz2
from multiprocessing import Process, Queue
from zs._zs import pack_data_records
QUIT = None
def worker(in_queue, out_queue, alloc_hint):
while True:
job = in_queue.get()
if job is QUIT:
break
idx, data = job
records = records.split("\n")
result = bz2.compress(pack_data_records(records, alloc_hint))
out_queue.put((idx, result))
def writer(in_queue):
waiting_for = 0
pending = {}
while True:
job = in_queue.get()
if job is QUIT:
assert not pending
break
idx, data = job
pending[idx] = data
while waiting_for in pending:
sys.stdout.write(pending[waiting_for])
del pending[waiting_for]
waiting_for += 1
def main(progname, args):
if len(args) != 2:
sys.exit("Usage: %s BUFSIZE NUM-THREADS" % (progname,))
bufsize = int(args[0])
num_threads = int(args[1])
worker_queue = Queue(num_threads * 2)
writer_queue = Queue(num_threads * 2)
writer_thread = Process(target=writer, args=(writer_queue,))
writer_thread.start()
worker_threads = []
worker_args = (worker_queue, writer_queue, bufsize * 2)
for i in xrange(num_threads):
worker_threads.append(Process(target=worker, args=worker_args))
worker_threads[-1].start()
i = 0
partial_line = ""
while True:
data = sys.stdin.read(bufsize)
if not data:
break
data = partial_line + data
data, partial_line = data.rsplit("\n", 1)
worker_queue.put((i, data))
i += 1
# Shut down
sys.stderr.write("Shutting down\n")
for i in xrange(num_threads):
worker_queue.put(QUIT)
for worker_thread in worker_threads:
worker_thread.join()
# All the worker threads have exited, so all their work has been enqueued
# to the writer thread. So our QUIT will end up after all actual work in
# the queue.
writer_queue.put(QUIT)
writer_thread.join()
sys.stderr.write("Shutdown successful\n")
if __name__ == "__main__":
main(sys.argv[0], sys.argv[1:])
|
_sframe_loader.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import numpy as _np
import turicreate as _tc
import mxnet as _mx
from six.moves.queue import Queue as _Queue
from threading import Thread as _Thread
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from ._detection import yolo_boxes_to_yolo_map as _yolo_boxes_to_yolo_map
_TMP_COL_RANDOM_ORDER = '_random_order'
def _is_rectangle_annotation(ann):
return 'type' not in ann or ann['type'] == 'rectangle'
def _is_valid_annotation(ann):
if not isinstance(ann, dict):
return False
if not _is_rectangle_annotation(ann):
# Not necessarily valid, but we bypass stricter checks (we simply do
# not care about non-rectangle types)
return True
return ('coordinates' in ann and
isinstance(ann['coordinates'], dict) and
set(ann['coordinates'].keys()) == {'x', 'y', 'width', 'height'} and
ann['coordinates']['width'] > 0 and
ann['coordinates']['height'] > 0 and
'label' in ann)
def _is_valid_annotations_list(anns):
return all([_is_valid_annotation(ann) for ann in anns])
# Encapsulates an SFrame, iterating over each row and returning an
# (image, label, index) tuple.
class _SFrameDataSource:
def __init__(self, sframe, feature_column, annotations_column,
load_labels=True, shuffle=True, samples=None):
self.annotations_column = annotations_column
self.feature_column = feature_column
self.load_labels = load_labels
self.shuffle = shuffle
self.num_samples = samples
self.cur_sample = 0
# Make shallow copy, so that temporary columns do not change input
self.sframe = sframe.copy()
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _next(self):
if self.cur_sample == self.num_samples:
raise StopIteration
# If we're about to begin a new epoch, shuffle the SFrame if requested.
row_index = self.cur_sample % len(self.sframe)
if row_index == 0 and self.cur_sample > 0 and self.shuffle:
self.sframe[_TMP_COL_RANDOM_ORDER] = _np.random.uniform(size=len(self.sframe))
self.sframe = self.sframe.sort(_TMP_COL_RANDOM_ORDER)
self.cur_sample += 1
# Copy the image data for this row into a NumPy array.
row = self.sframe[row_index]
image = row[self.feature_column].pixel_data
# Copy the annotated bounding boxes for this image, if requested.
if self.load_labels:
label = row[self.annotations_column]
if label == None:
label = []
elif type(label) == dict:
label = [label]
else:
label = None
return image, label, row_index
def reset(self):
self.cur_sample = 0
# A wrapper around _SFrameDataSource that uses a dedicated worker thread for
# performing SFrame operations.
class _SFrameAsyncDataSource:
def __init__(self, sframe, feature_column, annotations_column,
load_labels=True, shuffle=True, samples=None, buffer_size=256):
# This buffer_reset_queue will be used to communicate to the background
# thread. Each "message" is itself a _Queue that the background thread
# will use to communicate with us.
buffer_reset_queue = _Queue()
def worker():
data_source = _SFrameDataSource(
sframe, feature_column, annotations_column,
load_labels=load_labels, shuffle=shuffle, samples=samples)
while True:
buffer = buffer_reset_queue.get()
if buffer is None:
break # No more work to do, exit this thread.
for row in data_source:
buffer.put(row)
# Check if we've been reset (or told to exit).
if not buffer_reset_queue.empty():
break
# Always end each output buffer with None to signal completion.
buffer.put(None)
data_source.reset()
self.worker_thread = _Thread(target=worker)
self.worker_thread.daemon = True
self.worker_thread.start()
self.buffer_reset_queue = buffer_reset_queue
self.buffer_size = buffer_size
# Create the initial buffer and send it to the background thread, so
# that it begins sending us annotated images.
self.buffer = _Queue(self.buffer_size)
self.buffer_reset_queue.put(self.buffer)
def __del__(self):
# Tell the background thread to shut down.
self.buffer_reset_queue.put(None)
# Drain self.buffer to ensure that the background thread isn't stuck
# waiting to put something into it (and therefore never receives the
# shutdown request).
if self.buffer is not None:
while self.buffer.get() is not None:
pass
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _next(self):
# Guard against repeated calls after we've finished.
if self.buffer is None:
raise StopIteration
result = self.buffer.get()
if result is None:
# Any future attempt to get from self.buffer will block forever,
# since the background thread won't put anything else into it.
self.buffer = None
raise StopIteration
return result
def reset(self):
# Send a new buffer to the background thread, telling it to reset.
buffer = _Queue(self.buffer_size)
self.buffer_reset_queue.put(buffer)
# Drain self.buffer to ensure that the background thread isn't stuck
# waiting to put something into it (and therefore never receives the
# new buffer).
if self.buffer is not None:
while self.buffer.get() is not None:
pass
self.buffer = buffer
class SFrameDetectionIter(_mx.io.DataIter):
def __init__(self,
sframe,
batch_size,
input_shape,
output_shape,
anchors,
feature_column,
annotations_column,
class_to_index,
aug_params={},
loader_type='augmented',
load_labels=True,
shuffle=True,
io_thread_buffer_size=0,
epochs=None,
iterations=None):
# Some checks (these errors are not well-reported by the threads)
if sframe[feature_column].dtype != _tc.Image:
raise ValueError('Feature column must be of type Image')
num_classes = len(class_to_index)
num_anchors = len(anchors)
# Load images with random pertubations
if loader_type == 'augmented':
augs = _mx.image.CreateDetAugmenter(data_shape=(3,) + tuple(input_shape),
resize=aug_params['aug_resize'],
rand_crop=aug_params['aug_rand_crop'],
rand_pad=aug_params['aug_rand_pad'],
rand_mirror=aug_params['aug_horizontal_flip'],
rand_gray=aug_params['aug_rand_gray'],
mean=_np.zeros(3), std=_np.ones(3),
brightness=aug_params['aug_brightness'],
contrast=aug_params['aug_contrast'],
saturation=aug_params['aug_saturation'],
hue=aug_params['aug_hue'],
pca_noise=aug_params['aug_pca_noise'],
inter_method=aug_params['aug_inter_method'],
min_object_covered=aug_params['aug_min_object_covered'],
aspect_ratio_range=(1/aug_params['aug_aspect_ratio'], aug_params['aug_aspect_ratio']),
pad_val=(128, 128, 128),
min_eject_coverage=aug_params['aug_min_eject_coverage'],
area_range=aug_params['aug_area_range'])
elif loader_type == 'stretched':
augs = _mx.image.CreateDetAugmenter(data_shape=(3,) + tuple(input_shape),
rand_crop=0.0, rand_pad=0.0, rand_mirror=False,
mean=_np.zeros(3), std=_np.ones(3), brightness=0.0,
contrast=0.0, saturation=0.0, hue=0, pca_noise=0.0,
inter_method=1)
else:
raise ValueError('Unknown loader_type: {}'.format(loader_type))
self.augmentations = augs
self.batch_size = batch_size
self.input_shape = input_shape
self.output_shape = output_shape
self.num_classes = num_classes
self.anchors = anchors
self.class_to_index = class_to_index
self.cur_iteration = 0
self.num_epochs = epochs
self.num_iterations = iterations
if load_labels:
is_annotations_list = sframe[annotations_column].dtype == list
# Check that all annotations are valid
if is_annotations_list:
valids = sframe[annotations_column].apply(_is_valid_annotations_list)
else:
valids = sframe[annotations_column].apply(_is_valid_annotation)
# Deal with Nones, which are valid (pure negatives)
valids = valids.fillna(1)
if valids.nnz() != len(sframe):
# Fetch invalid row ids
invalid_ids = _tc.SFrame({'valid': valids}).add_row_number()[valids == 0]['id']
count = len(invalid_ids)
num_examples = 5
s = ""
for row_id in invalid_ids[:num_examples]:
# Find which annotations were invalid in the list
s += "\n\nRow ID {}:".format(row_id)
anns = sframe[row_id][annotations_column]
if not isinstance(anns, list):
anns = [anns]
for ann in anns:
if not _is_valid_annotation(ann):
s += "\n" + str(ann)
if count > num_examples:
s += "\n\n... ({} row(s) omitted)".format(count - num_examples)
# There were invalid rows
raise _ToolkitError("Invalid object annotations discovered.\n\n"
"A valid annotation is a dictionary that defines 'label' and 'coordinates',\n"
"the latter being a dictionary that defines 'x', 'y', 'width', and 'height'.\n"
"The following row(s) did not conform to this format:" + s)
# Compute the number of times we'll read a row from the SFrame.
sample_limits = []
if iterations is not None:
sample_limits.append(iterations * batch_size)
if epochs is not None:
sample_limits.append(epochs * len(sframe))
samples = min(sample_limits) if len(sample_limits) > 0 else None
if io_thread_buffer_size > 0:
# Delegate SFrame operations to a background thread, leaving this
# thread to Python-based work of copying to MxNet and scheduling
# augmentation work in the MXNet backend.
self.data_source = _SFrameAsyncDataSource(
sframe, feature_column, annotations_column,
load_labels=load_labels, shuffle=shuffle, samples=samples,
buffer_size=io_thread_buffer_size * batch_size)
else:
self.data_source = _SFrameDataSource(
sframe, feature_column, annotations_column,
load_labels=load_labels, shuffle=shuffle, samples=samples)
self._provide_data = [
_mx.io.DataDesc(name='image',
shape=(batch_size, 3) + tuple(input_shape),
layout='NCHW')
]
output_size = (num_classes + 5) * num_anchors
self._provide_label = [
_mx.io.DataDesc(name='label_map',
shape=(batch_size, output_size) + tuple(output_shape),
layout='NCHW')
]
def __iter__(self):
return self
def reset(self):
self.data_source.reset()
self.cur_iteration = 0
def __next__(self):
return self._next()
def next(self):
return self._next()
@property
def provide_data(self):
return self._provide_data
@property
def provide_label(self):
return self._provide_label
def _next(self):
images = []
ymaps = []
indices = []
orig_shapes = []
bboxes = []
classes = []
if self.cur_iteration == self.num_iterations:
raise StopIteration
# Since we pre-screened the annotations, at this point we just want to
# check that it's the right type (rectangle) and the class is included
def is_valid(ann):
return _is_rectangle_annotation(ann) and ann['label'] in self.class_to_index
pad = None
for b in range(self.batch_size):
try:
row = next(self.data_source)
except StopIteration:
if b == 0:
# Don't return an empty batch.
raise
else:
# It's time to finish, so we need to pad the batch
pad = self.batch_size - b
for p in range(pad):
images.append(_mx.nd.zeros(images[0].shape))
ymaps.append(_mx.nd.zeros(ymaps[0].shape))
indices.append(0)
orig_shapes.append([0, 0, 0])
break
raw_image, label, cur_sample = row
orig_image = _mx.nd.array(raw_image)
image = orig_image
oshape = orig_image.shape
if label is not None:
# Unchanged boxes, for evaluation
raw_bbox = _np.array([[
ann['coordinates']['x'],
ann['coordinates']['y'],
ann['coordinates']['width'],
ann['coordinates']['height'],
] for ann in label if is_valid(ann)]).reshape(-1, 4)
# MXNet-formatted boxes for input to data augmentation
bbox = _np.array([[
self.class_to_index[ann['label']],
(ann['coordinates']['x'] - ann['coordinates']['width'] / 2) / orig_image.shape[1],
(ann['coordinates']['y'] - ann['coordinates']['height'] / 2) / orig_image.shape[0],
(ann['coordinates']['x'] + ann['coordinates']['width'] / 2) / orig_image.shape[1],
(ann['coordinates']['y'] + ann['coordinates']['height'] / 2) / orig_image.shape[0],
] for ann in label if is_valid(ann)]).reshape(-1, 5)
else:
raw_bbox = _np.zeros((0, 4))
bbox = _np.zeros((0, 5))
for aug in self.augmentations:
try:
image, bbox = aug(image, bbox)
except ValueError:
# It is extremely rare, but mxnet can fail for some reason.
# If this happens, remove all boxes.
bbox = _np.zeros((0, 5))
image01 = image / 255.0
np_ymap = _yolo_boxes_to_yolo_map(bbox,
input_shape=self.input_shape,
output_shape=self.output_shape,
num_classes=self.num_classes,
anchors=self.anchors)
ymap = _mx.nd.array(np_ymap)
images.append(_mx.nd.transpose(image01, [2, 0, 1]))
ymaps.append(ymap)
indices.append(cur_sample)
orig_shapes.append(oshape)
bboxes.append(raw_bbox)
classes.append(bbox[:, 0].astype(_np.int32))
b_images = _mx.nd.stack(*images)
b_ymaps = _mx.nd.stack(*ymaps)
b_indices = _mx.nd.array(indices)
b_orig_shapes = _mx.nd.array(orig_shapes)
batch = _mx.io.DataBatch([b_images], [b_ymaps, b_indices, b_orig_shapes], pad=pad)
# Attach additional information
batch.raw_bboxes = bboxes
batch.raw_classes = classes
batch.iteration = self.cur_iteration
self.cur_iteration += 1
return batch
|
main.py
|
#!/usr/bin/env python
from telegram.ext import Updater, CommandHandler, MessageHandler
from telegram.ext.filters import Filters
import logging
import json
import jsonpickle
import re
import threading
import schedule
import time
from os import path
USERDATA_FILE = "userdata.json"
GROUPS_FILE = ".groups"
GENERAL_LOL_REGEX = re.compile(r"(.*lol.*)|(.*almao.*)|(.*arofl.*)", re.IGNORECASE)
LOL_SCORES = {
r".*(^|\s+|\.|,|!|\?|\:|\;)lol($|\s+|\.|,|!|\?|\:|\;)": 1,
r".*(^|\s+|\.|,|!|\?|\:|\;)rwalol($|\s+|\.|,|!|\?|\:|\;)": 2,
r".*(^|\s+|\.|,|!|\?|\:|\;)walol($|\s+|\.|,|!|\?|\:|\;)": 3,
r".*(^|\s+|\.|,|!|\?|\:|\;)alol($|\s+|\.|,|!|\?|\:|\;)": 4,
r".*(^|\s+|\.|,|!|\?|\:|\;)rwalmao($|\s+|\.|,|!|\?|\:|\;)": 5,
r".*(^|\s+|\.|,|!|\?|\:|\;)walmao($|\s+|\.|,|!|\?|\:|\;)": 6,
r".*(^|\s+|\.|,|!|\?|\:|\;)almao($|\s+|\.|,|!|\?|\:|\;)": 7,
r".*(^|\s+|\.|,|!|\?|\:|\;)rwarofl($|\s+|\.|,|!|\?|\:|\;)": 8,
r".*(^|\s+|\.|,|!|\?|\:|\;)warofl($|\s+|\.|,|!|\?|\:|\;)": 9,
r".*(^|\s+|\.|,|!|\?|\:|\;)arofl($|\s+|\.|,|!|\?|\:|\;)": 10,
}
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
class UserData(object):
def __init__(self, current_score: int, full_name: str, banned: bool):
self.current_score = current_score
self.full_name = full_name
self.banned = banned
dump_thread_stop = threading.Event()
def run_schedule():
while not dump_thread_stop.is_set():
schedule.run_pending()
time.sleep(1)
def dump_dictionary(dictionary: dict, file_name: str):
with open(file_name, "w") as fp:
fp.write(jsonpickle.encode(dictionary))
def update_dictionary(dictionary: dict, file_name: str) -> dict:
if path.exists(file_name):
with open(file_name, "r") as fp:
return jsonpickle.decode(fp.read())
# print(dictionary["1234"].current_score)
else:
dump_dictionary(dictionary, file_name)
return dictionary
# id_to_userdata = {"1234": UserData(4, "cameron reikes", False)}
id_to_userdata = {}
allowed_groups = []
with open(GROUPS_FILE, "r") as groups_file:
allowed_groups = groups_file.read().split("\n")
schedule.every(5).minutes.do(dump_dictionary, id_to_userdata, USERDATA_FILE)
dump_thread = threading.Thread(target=run_schedule)
dump_thread.start()
id_to_userdata = update_dictionary(id_to_userdata, USERDATA_FILE)
with open(".token", "r") as tokenfile:
updater = Updater(token=tokenfile.read().replace("\n", ""), use_context=True)
dispatcher = updater.dispatcher
def start(update, context):
context.bot.send_message(
chat_id=update.message.chat_id, text="If I see one illegal alol... 👀"
)
def on_lol_message(update, context):
logging.info("lol detected")
# update.message.reply_text("This is a lol")
if update.message.from_user == update.message.reply_to_message.from_user:
update.message.reply_text(
"You really thought 🤦♂️🤦♂️🤦♂️ bruhhhh..... bitchass meatbody. You want a ban?"
)
logging.info(
"User f{update.message.from_user.full_name} with id f{update.message.from_user.id} tried to get alols by replying to own message"
)
return
if (
update.message.from_user.id in id_to_userdata.keys()
and id_to_userdata[update.message.from_user.id].banned
) or (
update.message.reply_to_message.from_user.id in id_to_userdata.keys()
and id_to_userdata[update.message.reply_to_message.from_user.id].banned
):
logging.info(
"User with ID f{update.message.from_user.id} was banned, cannot do anything"
)
return
if update.message.chat.id < 0 and not str(update.message.chat.id) in allowed_groups:
logging.info(f"Message from unknown group {update.message.chat.id}")
update.message.reply_text("My daddy says I shouldn't talk to strangers 🤨")
updater.bot.leave_chat(update.message.chat.id)
return
lol_score = None
message_text = update.message.text.lower()
for lol_regex in LOL_SCORES.keys():
if re.match(lol_regex, message_text):
lol_score = LOL_SCORES[lol_regex]
break
if lol_score == None:
logging.error(f"No lol regex matched for: {update.message.text}!")
return
user = update.message.reply_to_message.from_user
user_id = str(user.id)
if user_id in id_to_userdata:
id_to_userdata[user_id].current_score += lol_score
else:
id_to_userdata[user_id] = UserData(lol_score, user.full_name, False)
logging.info(
f"User {user.full_name} gained {lol_score} points from user {update.message.from_user.full_name} with id f{update.message.from_user.id}!"
)
def get_key(key):
return int(key)
def get_scores(update, context):
global id_to_userdata
full_message = f"-- Current lol scores --\n"
sorted_userdata = sorted(
id_to_userdata.values(), key=lambda x: x.current_score, reverse=True
)
for user in sorted_userdata:
banned_str = "banned" if user.banned else "not banned"
full_message += f"{user.full_name}: {user.current_score}, {banned_str}\n"
context.bot.send_message(chat_id=update.message.chat_id, text=full_message)
def get_group_id(update, context):
update.message.reply_text(f"Group ID: {update.message.chat.id}")
"""
start - start counting lols
getgroupid - get the current group's id
getscores - get the list of current scores
"""
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("getgroupid", get_group_id))
dispatcher.add_handler(CommandHandler("getscores", get_scores))
lol_handler = MessageHandler(
filters=(Filters.reply & Filters.regex(GENERAL_LOL_REGEX)), callback=on_lol_message
)
dispatcher.add_handler(lol_handler)
updater.start_polling()
logging.info("Polling...")
updater.idle()
dump_thread_stop.set()
dump_thread.join()
dump_dictionary(id_to_userdata, USERDATA_FILE)
|
cache_azure.py
|
from __future__ import annotations
import collections
import functools
import pickle
import sys
from abc import ABC, abstractmethod
from concurrent.futures.thread import ThreadPoolExecutor
from typing import Callable, Dict, Union, Any, List, Sequence, Optional
import json
import logging
import re
import threading
from azure.storage.table import TableService, TableBatch, Entity
from azure.storage.blob import BlockBlobService
import pandas as pd
import numpy as np
from .cache import PersistentKeyValueCache, PeriodicUpdateHook
_log = logging.getLogger(__name__)
class Serialiser(ABC):
"""
Abstraction for mechanisms to serialise values, which do not fit table storage data model,
see https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-the-table-service-data-model
"""
@abstractmethod
def serialise(self, value) -> str:
pass
@abstractmethod
def deSerialise(self, value: str):
pass
class NumpyArrayJsonSerialiser(Serialiser):
"""
Serialises a numpy array as json string of list representation of array
"""
def serialise(self, value: np.ndarray) -> str:
return json.dumps(value.tolist())
def deSerialise(self, value: str):
return np.array(json.loads(value))
class PropertyLoader(ABC):
"""
Abstraction of a customised loader for an entity property
"""
@abstractmethod
def loadPropertyValue(self, entity: Entity):
pass
@abstractmethod
def writePropertyValue(self, entity: Entity):
pass
@abstractmethod
def loadPropertyValueToDataFrameColumn(self, df: pd.DataFrame):
pass
class SerialisedPropertyLoader(PropertyLoader):
"""
PropertyLoader to serialise and de-serialise values. Useful, if type of values is not aligned with table storage data model,
see https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-the-table-service-data-model
"""
def __init__(self, propertyName: str, serialiser: Serialiser):
self.serialiser = serialiser
self.propertyName = propertyName
def loadPropertyValue(self, entity: Entity):
entity[self.propertyName] = self.serialiser.deSerialise(entity[self.propertyName])
def writePropertyValue(self, entity: Entity):
entity[self.propertyName] = self.serialiser.serialise(entity[self.propertyName])
def loadPropertyValueToDataFrameColumn(self, df: pd.DataFrame):
if self.propertyName in df.columns:
df.loc[:, self.propertyName] = [self.serialiser.deSerialise(value) for value in df[self.propertyName]]
class AzureTableBlobBackend(ABC):
"""
Abstraction of a blob backend, which allows for convenient setting and getting of values stored in blob storage via a
reference to the value
"""
@abstractmethod
def getValueFromReference(self, valueIdentifier: str):
pass
@abstractmethod
def getValueReference(self, partitionKey: str, rowKey: str, valueName: str, blobNamePrefix: str = None) -> str:
pass
@abstractmethod
def setValueForReference(self, valueIdentifier: str, value):
pass
class BlobPerKeyAzureTableBlobBackend(AzureTableBlobBackend, ABC):
"""
Backend stores serialised values as /tableName/partitionKey/rowKey/valueName.<fileExtension>
or /tableName/rowKey/valueName.<fileExtension>, if partitionKey equals tableName
"""
def __init__(self, blockBlobService: BlockBlobService, containerName: str):
"""
:param blockBlobService: https://docs.microsoft.com/en-us/python/api/azure-storage-blob/azure.storage.blob.blockblobservice.blockblobservice?view=azure-python-previous
"""
self.blockBlobService = blockBlobService
self.containerName = containerName
self.containerList = [container.name for container in blockBlobService.list_containers()]
if containerName not in self.containerList:
self.blockBlobService.create_container(containerName)
self.containerList.append(containerName)
@property
@abstractmethod
def fileExtension(self):
pass
@abstractmethod
def _getBlobValue(self, containerName, blobName):
pass
@abstractmethod
def _writeValueToBlob(self, containerName, blobName, value):
pass
def getValueFromReference(self, valueIdentifier: str):
containerName = self._getContainerNameFromIdentifier(valueIdentifier)
blobName = self._getBlobNameFromIdentifier(valueIdentifier)
return self._getBlobValue(containerName, blobName)
def getValueReference(self, partitionKey: str, rowKey: str, valueName: str, blobNamePrefix: str = None) -> str:
blobName = self._getBlobNameFromKeys(partitionKey, rowKey, valueName, blobPrefix=blobNamePrefix)
return self.blockBlobService.make_blob_url(self.containerName, blobName)
def setValueForReference(self, valueIdentifier: str, value):
containerName = self._getContainerNameFromIdentifier(valueIdentifier)
blobName = self._getBlobNameFromIdentifier(valueIdentifier)
self._writeValueToBlob(containerName, blobName, value)
def _getBlobNameFromIdentifier(self, valueIdentifier: str):
return (valueIdentifier.partition(f"{self.blockBlobService.primary_endpoint}/")[2]).partition("/")[2]
def _getContainerNameFromIdentifier(self, valueIdentifier: str):
return (valueIdentifier.partition(f"{self.blockBlobService.primary_endpoint}/")[2]).partition("/")[0]
def _getBlobNameFromKeys(self, partitionKey: str, rowKey: str, valueName: str, blobPrefix: str = None):
identifierList = [blobPrefix, partitionKey] if blobPrefix is not None and blobPrefix != partitionKey else [partitionKey]
identifierList.extend([rowKey, valueName])
return "/".join(identifierList) + self.fileExtension
class TextDumpAzureTableBlobBackend(BlobPerKeyAzureTableBlobBackend):
"""
Backend stores values as txt files in the structure /tableName/partitionKey/rowKey/valueName
"""
@property
def fileExtension(self):
return ""
def _getBlobValue(self, containerName, blobName):
return self.blockBlobService.get_blob_to_text(containerName, blobName).content
def _writeValueToBlob(self, containerName, blobName, value):
self.blockBlobService.create_blob_from_text(containerName, blobName, value)
class JsonAzureTableBlobBackend(BlobPerKeyAzureTableBlobBackend):
"""
Backend stores values as json files in the structure /tableName/partitionKey/rowKey/valueName.json
"""
@property
def fileExtension(self):
return ".json"
def _getBlobValue(self, containerName, blobName):
encodedValue = self.blockBlobService.get_blob_to_bytes(containerName, blobName).content
return self._decodeBytesToValue(encodedValue)
def _writeValueToBlob(self, containerName, blobName, value):
encodedValue = self._encodeValueToBytes(value)
self.blockBlobService.create_blob_from_bytes(containerName, blobName, encodedValue)
@staticmethod
def _encodeValueToBytes(value):
return str.encode(json.dumps(value))
@staticmethod
def _decodeBytesToValue(_bytes):
return json.loads(_bytes.decode())
class PickleAzureTableBlobBackend(JsonAzureTableBlobBackend):
"""
Backend stores values as pickle files in the structure /tableName/partitionKey/rowKey/valueName.pickle
"""
@property
def fileExtension(self):
return ".pickle"
@staticmethod
def _encodeValueToBytes(value):
return pickle.dumps(value)
@staticmethod
def _decodeBytesToValue(_bytes):
return pickle.loads(_bytes)
class BlobBackedPropertyLoader(PropertyLoader):
AZURE_ALLOWED_SIZE_PER_PROPERTY_BYTES = 64000
AZURE_ALLOWED_STRING_LENGTH_PER_PROPERTY = 32000
"""
PropertyLoader to write and read values from blob backend via a reference to the value. Useful, if values cannot
be stored in table storage itself, due to not being aligned with table storage data model,
see https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-the-table-service-data-model
"""
def __init__(self, propertyName: str, blobBackend: AzureTableBlobBackend, blobPrefix: str = None, propertyBooleanBlobStatusName: str = None, max_workers=None):
"""
:param propertyName: name of property in table
:param propertyBooleanBlobStatusName: name of property representing a boolean flag within a table, which indicates, if value is blob backed.
If None, each value is assumed to be blob backed.
:param blobBackend: actual backend to use for storage
:param blobPrefix: prefix to use for blob in storage, e.g. a table name
:param max_workers: maximal number of workers to load data from blob storage
"""
self.blobPrefix = blobPrefix
self.propertyBlobStatusName = propertyBooleanBlobStatusName
self.blobBackend = blobBackend
self.max_workers = max_workers
self.propertyName = propertyName
def loadPropertyValue(self, entity: Entity):
if self._isEntityValueBlobBacked(entity):
entity[self.propertyName] = self.blobBackend.getValueFromReference(entity[self.propertyName])
def writePropertyValue(self, entity: Entity):
if self.propertyName in entity.keys():
if self._needToWriteToBlob(entity[self.propertyName]):
valueIdentifier = self.blobBackend.getValueReference(entity["PartitionKey"], entity["RowKey"], self.propertyName, blobNamePrefix=self.blobPrefix)
value = entity[self.propertyName]
self.blobBackend.setValueForReference(valueIdentifier, value)
entity[self.propertyName] = valueIdentifier
propertyBlobStatus = True if self.propertyBlobStatusName is not None else None
else:
propertyBlobStatus = False if self.propertyBlobStatusName is not None else None
if propertyBlobStatus is not None:
entity[self.propertyBlobStatusName] = propertyBlobStatus
def loadPropertyValueToDataFrameColumn(self, df: pd.DataFrame):
if self.propertyName in df.columns:
if self.propertyBlobStatusName is None:
df.loc[:, self.propertyName] = self._loadValuesInSeries(df[self.propertyName])
else:
df.loc[df[self.propertyBlobStatusName], self.propertyName] = self._loadValuesInSeries(df.loc[df[self.propertyBlobStatusName], self.propertyName])
def _needToWriteToBlob(self, value):
if self.propertyBlobStatusName is None:
return True
if sys.getsizeof(value) > self.AZURE_ALLOWED_SIZE_PER_PROPERTY_BYTES:
return True
if isinstance(value, str) and len(value) > self.AZURE_ALLOWED_STRING_LENGTH_PER_PROPERTY:
return True
return False
def _isEntityValueBlobBacked(self, entity: Entity):
if self.propertyName not in entity.keys():
return False
if self.propertyBlobStatusName is None or self.propertyBlobStatusName not in entity:
return True
return entity[self.propertyBlobStatusName]
def _loadValuesInSeries(self, _series: pd.Series):
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
_series = list(executor.map(self.blobBackend.getValueFromReference, _series))
return _series
class BlobBackedSerialisedPropertyLoader(BlobBackedPropertyLoader, SerialisedPropertyLoader):
"""
Property loader, which combines serialisation and blob backing.
"""
def __init__(self, propertyName, serialiser: Serialiser, blobBackend: AzureTableBlobBackend, blobPrefix: str = None,
propertyBooleanBlobStatusName: str = None, max_workers=None):
"""
:param propertyName: name of property in table
:param serialiser:
:param propertyBooleanBlobStatusName: name of property representing a boolean flag within a table, which indicates, if value is blob backed.
If None, each value is assumed to be blob backed.
:param blobBackend: actual backend to use for storage
:param blobPrefix: prefix to use for blob in storage, e.g. a table name
:param max_workers: maximal number of workers to load data from blob storage
"""
SerialisedPropertyLoader.__init__(self, propertyName, serialiser)
BlobBackedPropertyLoader.__init__(self, propertyName, blobBackend, blobPrefix, propertyBooleanBlobStatusName, max_workers)
def loadPropertyValue(self, entity: Entity):
super(BlobBackedPropertyLoader, self).loadPropertyValue(entity)
super(SerialisedPropertyLoader, self).loadPropertyValue(entity)
def writePropertyValue(self, entity: Entity):
super(SerialisedPropertyLoader, self).writePropertyValue(entity)
super(BlobBackedPropertyLoader, self).writePropertyValue(entity)
def loadPropertyValueToDataFrameColumn(self, df: pd.DataFrame):
super(BlobBackedPropertyLoader, self).loadPropertyValueToDataFrameColumn(df)
super(SerialisedPropertyLoader, self).loadPropertyValueToDataFrameColumn(df)
class AzureLazyBatchCommitTable:
"""
Wrapper for an Azure table, which allow for convenient insertion via lazy batch execution per partition.
Uses a priority queue to manage order of partitions to be committed.
To execute insertions, call :func:`LazyBatchCommitTable.commit`
"""
AZURE_ALLOWED_TABLE_NAME_PATTERN = re.compile("^[A-Za-z][A-Za-z0-9]{2,62}$")
AZURE_ALLOWED_TABLE_BATCH_SIZE = 100
class PartitionCommandsPriorityQueue:
class PartitionCommands:
def __init__(self, partitionKey):
self.partitionKey = partitionKey
self._commandList = collections.deque()
def __len__(self):
return len(self._commandList)
def append(self, command):
self._commandList.append(command)
def execute(self, contextManager: Callable[[], TableBatch], batchSize: int):
while len(self._commandList) > 0:
_slice = [self._commandList.popleft() for _ in range(min(batchSize, len(self._commandList)))]
_log.info(f"Committing {len(_slice)} cache entries to the database")
with contextManager() as batch:
for command in _slice:
command(batch)
def __init__(self):
self.partitionCommandsQueue = []
self.partitionKey2Commands = {}
self._threadLock = threading.Lock()
def addCommand(self, partitionKey, command: Union[Callable[[TableBatch], Any], functools.partial[TableBatch]]):
"""
Add a command to queue of corresponding partitionKey
:param partitionKey:
:param command: a callable on a TableBatch
"""
with self._threadLock:
if partitionKey not in self.partitionKey2Commands:
commands = self.PartitionCommands(partitionKey)
self.partitionCommandsQueue.append(commands)
self.partitionKey2Commands[partitionKey] = commands
self.partitionKey2Commands[partitionKey].append(command)
def pop(self, minLength: int = None) -> Optional[AzureLazyBatchCommitTable.PartitionCommandsPriorityQueue.PartitionCommands]:
"""
:param minLength: minimal length of largest PartitionCommands for the pop to take place.
:return: largest PartitionCommands or None if minimal length is not reached
"""
with self._threadLock:
return self._pop(minLength)
def popAll(self):
with self._threadLock:
commandsList = []
while not self._isEmpty():
commandsList.append(self._pop())
return commandsList
def isEmpty(self):
with self._threadLock:
return self._isEmpty()
def _pop(self, minLength=None):
length, index = self._getMaxPriorityInfo()
if index is not None and (minLength is None or length >= minLength):
q = self.partitionCommandsQueue.pop(index)
del self.partitionKey2Commands[q.partitionKey]
return q
else:
return None
def _isEmpty(self):
return len(self.partitionCommandsQueue) == 0
def _getMaxPriorityInfo(self):
lengthsList = list(map(len, self.partitionCommandsQueue))
if len(lengthsList) == 0:
return 0, None
maxLength = max(lengthsList)
return maxLength, lengthsList.index(maxLength)
def __init__(self, tableName: str, tableService: TableService, propertyLoaders: Sequence[PropertyLoader] = ()):
"""
:param tableName: name of table
:param tableService: instance of :class:`azure.storage.table.TableService` to connect to Azure table storage
:param propertyLoaders:
"""
if not self.AZURE_ALLOWED_TABLE_NAME_PATTERN.match(tableName):
raise ValueError(f"Invalid table name {tableName}, see: https://docs.microsoft.com/en-us/rest/api/storageservices/Understanding-the-Table-Service-Data-Model")
self.tableService = tableService
self.tableName = tableName
self.propertyLoaders = propertyLoaders
self._partitionQueues = self.PartitionCommandsPriorityQueue()
self._contextManager = functools.partial(self.tableService.batch, self.tableName)
if not self.exists():
self.tableService.create_table(self.tableName)
def insertOrReplaceEntity(self, entity: Union[Dict, Entity]):
"""
Lazy wrapper method for :func:`azure.storage.table.TableService.insert_or_replace_entity`
:param entity:
"""
partitionKey = entity["PartitionKey"]
for propertyLoader in self.propertyLoaders:
propertyLoader.writePropertyValue(entity)
executionCommand = functools.partial(self._insertOrReplaceEntityViaBatch, entity)
self._partitionQueues.addCommand(partitionKey, executionCommand)
def insertEntity(self, entity: Union[Dict, Entity]):
"""
Lazy wrapper method for :func:`azure.storage.table.TableService.insert_entity`
:param entity:
"""
partitionKey = entity["PartitionKey"]
for propertyLoader in self.propertyLoaders:
propertyLoader.writePropertyValue(entity)
executionCommand = functools.partial(self._insertEntityViaBatch, entity)
self._partitionQueues.addCommand(partitionKey, executionCommand)
def getEntity(self, partitionKey: str, rowKey: str) -> Optional[Entity]:
"""
Wraps :func:`azure.storage.table.TableService.get_entity`
:param partitionKey:
:param rowKey:
:return:
"""
try:
entity = self.tableService.get_entity(self.tableName, partitionKey, rowKey)
for propertyLoader in self.propertyLoaders:
propertyLoader.loadPropertyValue(entity)
return entity
except Exception as e:
_log.debug(f"Unable to load value for partitionKey {partitionKey} and rowKey {rowKey} from table {self.tableName}: {e}")
return None
def commitBlockingUntilEmpty(self, maxBatchSize=AZURE_ALLOWED_TABLE_BATCH_SIZE):
"""
Commit insertion commands. Commands are executed batch-wise per partition until partition queue is empty in a
blocking manner.
:param maxBatchSize: maximal batch size to use for batch insertion, must be less or equal to batch size allowed by Azure
"""
maxBatchSize = self._validateMaxBatchSize(maxBatchSize)
while not self._partitionQueues.isEmpty():
commands = self._partitionQueues.pop()
commands.execute(self._contextManager, maxBatchSize)
def commitNonBlockingCurrentQueueState(self, maxBatchSize=AZURE_ALLOWED_TABLE_BATCH_SIZE):
"""
Commit insertion commands. Empties the current PartitionCommandsQueue in a non blocking way.
Commands are executed batch-wise per partition.
:param maxBatchSize: maximal batch size to use for batch insertion, must be less or equal to batch size allowed by Azure
"""
maxBatchSize = self._validateMaxBatchSize(maxBatchSize)
def commit():
commandsList = self._partitionQueues.popAll()
for commands in commandsList:
commands.execute(self._contextManager, maxBatchSize)
thread = threading.Thread(target=commit, daemon=False)
thread.start()
def commitBlockingLargestPartitionFromQueue(self, maxBatchSize=AZURE_ALLOWED_TABLE_BATCH_SIZE, minLength=None):
"""
Commits in a blocking way the largest partition from PartitionCommandsQueue
:param maxBatchSize: maximal batch size to use for batch insertion, must be less or equal to batch size allowed by Azure
:param minLength: minimal size of largest partition. If not None, pop and commit only if minLength is reached.
:return:
"""
maxBatchSize = self._validateMaxBatchSize(maxBatchSize)
commands = self._partitionQueues.pop(minLength)
if commands is not None:
commands.execute(self._contextManager, maxBatchSize)
def _validateMaxBatchSize(self, maxBatchSize):
if maxBatchSize > self.AZURE_ALLOWED_TABLE_BATCH_SIZE:
_log.warning(f"Provided maxBatchSize is larger than allowed size {self.AZURE_ALLOWED_TABLE_BATCH_SIZE}. Will use maxBatchSize {self.AZURE_ALLOWED_TABLE_BATCH_SIZE} instead.")
maxBatchSize = self.AZURE_ALLOWED_TABLE_BATCH_SIZE
return maxBatchSize
def loadTableToDataFrame(self, columns: List[str] = None, rowFilterQuery: str = None, numRecords: int = None):
"""
Load all rows of table to :class:`~pandas.DataFrame`
:param rowFilterQuery:
:param numRecords:
:param columns: restrict loading to provided columns
:return: :class:`~pandas.DataFrame`
"""
if numRecords is None:
records = list(self._iterRecords(columns, rowFilterQuery))
else:
records = []
for record in self._iterRecords(columns, rowFilterQuery):
records.append(record)
if len(records) >= numRecords:
break
df = pd.DataFrame(records, columns=columns)
for propertyLoader in self.propertyLoaders:
propertyLoader.loadPropertyValueToDataFrameColumn(df)
return df
def iterDataFrameChunks(self, chunkSize: int, columns: List[str] = None, rowFilterQuery: str = None):
"""
Get a generator of dataframe chunks
:param rowFilterQuery:
:param chunkSize:
:param columns:
:return:
"""
records = []
for record in self._iterRecords(columns, rowFilterQuery):
records.append(record)
if len(records) >= chunkSize:
df = pd.DataFrame(records, columns=columns)
for propertyLoader in self.propertyLoaders:
propertyLoader.loadPropertyValueToDataFrameColumn(df)
yield df
records = []
def iterRecords(self, columns: List[str] = None, rowFilterQuery: str = None):
"""
Get a generator of table entities
:param rowFilterQuery:
:param columns:
:return:
"""
for entity in self._iterRecords(columns, rowFilterQuery):
for propertyLoader in self.propertyLoaders:
propertyLoader.loadPropertyValue(entity)
yield entity
def _iterRecords(self, columns: Optional[List[str]], rowFilterQuery: Optional[str]):
columnNamesAsCommaSeparatedString = None
if columns is not None:
columnNamesAsCommaSeparatedString = ",".join(columns)
return self.tableService.query_entities(self.tableName, select=columnNamesAsCommaSeparatedString,
filter=rowFilterQuery)
def insertDataFrameToTable(self, df: pd.DataFrame, partitionKeyGenerator: Callable[[str], str] = None, numRecords: int = None):
"""
Inserts or replace entities of the table corresponding to rows of the DataFrame, where the index of the dataFrame acts as rowKey.
Values of object type columns in the dataFrame may have to be serialised via json beforehand.
:param df: DataFrame to be inserted
:param partitionKeyGenerator: if None, partitionKeys default to tableName
:param numRecords: restrict insertion to first numRecords rows, merely for testing
"""
for (count, (idx, row)) in enumerate(df.iterrows()):
if numRecords is not None:
if count >= numRecords:
break
entity = row.to_dict()
entity["RowKey"] = idx
entity["PartitionKey"] = self.tableName if partitionKeyGenerator is None else partitionKeyGenerator(idx)
self.insertOrReplaceEntity(entity)
@staticmethod
def _insertOrReplaceEntityViaBatch(entity, batch: TableBatch):
return batch.insert_or_replace_entity(entity)
@staticmethod
def _insertEntityViaBatch(entity, batch: TableBatch):
return batch.insert_entity(entity)
def exists(self):
return self.tableService.exists(self.tableName)
class AzureTablePersistentKeyValueCache(PersistentKeyValueCache):
"""
PersistentKeyValueCache using Azure Table Storage, see https://docs.microsoft.com/en-gb/azure/storage/tables/
"""
CACHE_VALUE_IDENTIFIER = "cache_value"
def __init__(self, tableService: TableService, tableName="cache", partitionKeyGenerator: Callable[[str], str] = None,
maxBatchSize=100, minSizeForPeriodicCommit: Optional[int] = 100, deferredCommitDelaySecs=1.0, inMemory=False,
blobBackend: AzureTableBlobBackend = None, serialiser: Serialiser = None, max_workers: int = None):
"""
:param tableService: https://docs.microsoft.com/en-us/python/api/azure-cosmosdb-table/azure.cosmosdb.table.tableservice.tableservice?view=azure-python
:param tableName: name of table, needs to match restrictions for Azure storage resources, see https://docs.microsoft.com/en-gb/azure/azure-resource-manager/management/resource-name-rules
:param partitionKeyGenerator: callable to generate a partitionKey from provided string, if None partitionKey in requests defaults to tableName
:param maxBatchSize: maximal batch size for each commit.
:param deferredCommitDelaySecs: the time frame during which no new data must be added for a pending transaction to be committed
:param minSizeForPeriodicCommit: minimal size of a batch to be committed in a periodic thread.
If None, commits are only executed in a deferred manner, i.e. commit only if there is no update for deferredCommitDelaySecs
:param inMemory: boolean flag, to indicate, if table should be loaded in memory at construction
:param blobBackend: if not None, blob storage will be used to store actual value and cache_value in table only contains a reference
:param max_workers: maximal number of workers to load data from blob backend
"""
self._deferredCommitDelaySecs = deferredCommitDelaySecs
self._partitionKeyGenerator = partitionKeyGenerator
def createPropertyLoaders():
if blobBackend is None and serialiser is None:
_propertyLoaders = ()
elif blobBackend is None and serialiser is not None:
_propertyLoaders = (SerialisedPropertyLoader(self.CACHE_VALUE_IDENTIFIER, serialiser),)
elif blobBackend is not None and serialiser is None:
propertyBlobStatusName = self.CACHE_VALUE_IDENTIFIER + "_blob_backed"
_propertyLoaders = (BlobBackedPropertyLoader(self.CACHE_VALUE_IDENTIFIER, blobBackend, tableName,
propertyBlobStatusName, max_workers),)
else:
propertyBlobStatusName = self.CACHE_VALUE_IDENTIFIER + "_blob_backed"
_propertyLoaders = (BlobBackedSerialisedPropertyLoader(self.CACHE_VALUE_IDENTIFIER, serialiser, blobBackend,
tableName, propertyBlobStatusName, max_workers),)
return _propertyLoaders
propertyLoaders = createPropertyLoaders()
self._batchCommitTable = AzureLazyBatchCommitTable(tableName, tableService, propertyLoaders=propertyLoaders)
self._minSizeForPeriodicCommit = minSizeForPeriodicCommit
self._maxBatchSize = maxBatchSize
self._updateHook = PeriodicUpdateHook(deferredCommitDelaySecs, noUpdateFn=self._commit, periodicFn=self._periodicallyCommit)
self._inMemoryCache = None
if inMemory:
df = self._batchCommitTable.loadTableToDataFrame(columns=['RowKey', self.CACHE_VALUE_IDENTIFIER]).set_index("RowKey")
_log.info(f"Loaded {len(df)} entries of table {tableName} in memory")
self._inMemoryCache = df[self.CACHE_VALUE_IDENTIFIER].to_dict()
def set(self, key, value):
keyAsString = str(key)
partitionKey = self._getPartitionKeyForRowKey(keyAsString)
entity = {'PartitionKey': partitionKey, 'RowKey': keyAsString, self.CACHE_VALUE_IDENTIFIER: value}
self._batchCommitTable.insertOrReplaceEntity(entity)
self._updateHook.handleUpdate()
if self._inMemoryCache is not None:
self._inMemoryCache[keyAsString] = value
def get(self, key):
keyAsString = str(key)
value = self._getFromInMemoryCache(keyAsString)
if value is None:
value = self._getFromTable(keyAsString)
return value
def _getFromTable(self, key: str):
partitionKey = self._getPartitionKeyForRowKey(key)
entity = self._batchCommitTable.getEntity(partitionKey, key)
if entity is not None:
return entity[self.CACHE_VALUE_IDENTIFIER]
return None
def _getFromInMemoryCache(self, key):
if self._inMemoryCache is None:
return None
return self._inMemoryCache.get(str(key), None)
def _getPartitionKeyForRowKey(self, key: str):
return self._batchCommitTable.tableName if self._partitionKeyGenerator is None else self._partitionKeyGenerator(key)
def _commit(self):
self._batchCommitTable.commitNonBlockingCurrentQueueState(self._maxBatchSize)
def _periodicallyCommit(self):
self._batchCommitTable.commitBlockingLargestPartitionFromQueue(self._maxBatchSize, self._minSizeForPeriodicCommit)
|
exp_monitor.py
|
from monitor import monitor_qlen
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import sys
import os
parser = ArgumentParser(description="CWND/Queue Monitor")
parser.add_argument('--exp', '-e',
dest="exp",
action="store",
help="Name of the Experiment",
required=True)
# Expt parameters
args = parser.parse_args()
def start_tcpprobe():
"Install tcp_pobe module and dump to file"
os.system("(rmmod tcp_probe >/dev/null 2>&1); modprobe tcp_probe full=1;")
print "Monitoring TCP CWND ... will save it to ./%s_tcpprobe.txt " % args.exp
Popen("cat /proc/net/tcpprobe > ./%s_tcpprobe.txt" %
args.exp, shell=True)
def qmon():
monitor = Process(target=monitor_qlen,args=('s0-eth2', 0.01, '%s_sw0-qlen.txt' % args.exp ))
monitor.start()
print "Monitoring Queue Occupancy ... will save it to %s_sw0-qlen.txt " % args.exp
raw_input('Press Enter key to stop the monitor--> ')
monitor.terminate()
if __name__ == '__main__':
start_tcpprobe()
qmon()
Popen("killall -9 cat", shell=True).wait()
|
rpc_api.py
|
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
import threading
class RPCApi(object):
functions = []
def __init__(self, config):
self.config = config
self.server = SimpleJSONRPCServer((self.config['rpc_host'], self.config['rpc_port']))
self.server.timeout = self.config['rpc_timeout'] if "rpc_timeout" in config else 1
self.register_function(self.list_functions, "list_functions")
def register_functions(self, **kwargs):
"""Registers functions with the server."""
for function_name in kwargs:
function = kwargs[function_name]
self.register_function(function, function_name)
def register_function(self, function, function_name):
"""Registers a single function with the server."""
self.server.register_function(function, function_name)
self.functions.append(function_name)
def list_functions(self):
"""An externally accessible function returning all the registered function names"""
return list(set(self.functions))
def poll(self):
"""Serves one request from the waiting requests and returns"""
self.server.handle_request()
def run(self):
"""Blocks execution and runs the server till the program shutdown"""
self.server.serve_forever()
def start_thread(self):
"""Starts self.run() in a separate thread"""
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
|
PlexAPI.py
|
#!/usr/bin/env python
"""
Collection of "connector functions" to Plex Media Server/MyPlex
PlexGDM:
loosely based on hippojay's plexGDM:
https://github.com/hippojay/script.plexbmc.helper... /resources/lib/plexgdm.py
Plex Media Server communication:
source (somewhat): https://github.com/hippojay/plugin.video.plexbmc
later converted from httplib to urllib2
Transcoder support:
PlexAPI_getTranscodePath() based on getTranscodeURL from pyplex/plexAPI
https://github.com/megawubs/pyplex/blob/master/plexAPI/info.py
MyPlex - Basic Authentication:
http://www.voidspace.org.uk/python/articles/urllib2.shtml
http://www.voidspace.org.uk/python/articles/authentication.shtml
http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
(and others...)
"""
import sys
import struct
import time
import urllib2, socket, StringIO, gzip
from threading import Thread
import Queue
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from urllib import urlencode, quote_plus
from Version import __VERSION__
from Debug import * # dprint(), prettyXML()
"""
storage for PMS addresses and additional information - now per aTV! (replaces global PMS_list)
syntax: PMS[<ATV_UDID>][PMS_UUID][<data>]
data: name, ip, ...type (local, myplex)
"""
g_PMS = {}
"""
Plex Media Server handling
parameters:
ATV_udid
uuid - PMS ID
name, scheme, ip, port, type, owned, token
"""
def declarePMS(ATV_udid, uuid, name, scheme, ip, port):
# store PMS information in g_PMS database
global g_PMS
if not ATV_udid in g_PMS:
g_PMS[ATV_udid] = {}
address = ip + ':' + port
baseURL = scheme+'://'+ip+':'+port
g_PMS[ATV_udid][uuid] = { 'name': name,
'scheme':scheme, 'ip': ip , 'port': port,
'address': address,
'baseURL': baseURL,
'local': '1',
'owned': '1',
'accesstoken': '',
'enableGzip': False
}
def updatePMSProperty(ATV_udid, uuid, tag, value):
# set property element of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
g_PMS[ATV_udid][uuid][tag] = value
def getPMSProperty(ATV_udid, uuid, tag):
# get name of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
return g_PMS[ATV_udid][uuid].get(tag, '')
def getPMSFromAddress(ATV_udid, address):
# find PMS by IP, return UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
for uuid in g_PMS[ATV_udid]:
if address in g_PMS[ATV_udid][uuid].get('address', None):
return uuid
return '' # IP not found
def getPMSAddress(ATV_udid, uuid):
# get address of PMS by UUID
if not ATV_udid in g_PMS:
return '' # no server known for this aTV
if not uuid in g_PMS[ATV_udid]:
return '' # requested PMS not available
return g_PMS[ATV_udid][uuid]['ip'] + ':' + g_PMS[ATV_udid][uuid]['port']
def getPMSCount(ATV_udid):
# get count of discovered PMS by UUID
if not ATV_udid in g_PMS:
return 0 # no server known for this aTV
return len(g_PMS[ATV_udid])
"""
PlexGDM
parameters:
none
result:
PMS_list - dict() of PMSs found
"""
IP_PlexGDM = '239.0.0.250' # multicast to PMS
Port_PlexGDM = 32414
Msg_PlexGDM = 'M-SEARCH * HTTP/1.0'
def PlexGDM():
dprint(__name__, 0, "***")
dprint(__name__, 0, "looking up Plex Media Server")
dprint(__name__, 0, "***")
# setup socket for discovery -> multicast message
GDM = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
GDM.settimeout(1.0)
# Set the time-to-live for messages to 1 for local network
ttl = struct.pack('b', 1)
GDM.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
returnData = []
try:
# Send data to the multicast group
dprint(__name__, 1, "Sending discovery message: {0}", Msg_PlexGDM)
GDM.sendto(Msg_PlexGDM, (IP_PlexGDM, Port_PlexGDM))
# Look for responses from all recipients
while True:
try:
data, server = GDM.recvfrom(1024)
dprint(__name__, 1, "Received data from {0}", server)
dprint(__name__, 1, "Data received:\n {0}", data)
returnData.append( { 'from' : server,
'data' : data } )
except socket.timeout:
break
finally:
GDM.close()
discovery_complete = True
PMS_list = {}
if returnData:
for response in returnData:
update = { 'ip' : response.get('from')[0] }
# Check if we had a positive HTTP response
if "200 OK" in response.get('data'):
for each in response.get('data').split('\n'):
# decode response data
update['discovery'] = "auto"
#update['owned']='1'
#update['master']= 1
#update['role']='master'
if "Content-Type:" in each:
update['content-type'] = each.split(':')[1].strip()
elif "Resource-Identifier:" in each:
update['uuid'] = each.split(':')[1].strip()
elif "Name:" in each:
update['serverName'] = each.split(':')[1].strip().decode('utf-8', 'replace') # store in utf-8
elif "Port:" in each:
update['port'] = each.split(':')[1].strip()
elif "Updated-At:" in each:
update['updated'] = each.split(':')[1].strip()
elif "Version:" in each:
update['version'] = each.split(':')[1].strip()
PMS_list[update['uuid']] = update
if PMS_list=={}:
dprint(__name__, 0, "GDM: No servers discovered")
else:
dprint(__name__, 0, "GDM: Servers discovered: {0}", len(PMS_list))
for uuid in PMS_list:
dprint(__name__, 1, "{0} {1}:{2}", PMS_list[uuid]['serverName'], PMS_list[uuid]['ip'], PMS_list[uuid]['port'])
return PMS_list
"""
discoverPMS
parameters:
ATV_udid
CSettings - for manual PMS configuration. this one looks strange.
IP_self
optional:
tokenDict - dictionary of tokens for MyPlex, PlexHome
result:
g_PMS database for ATV_udid
"""
def discoverPMS(ATV_udid, CSettings, IP_self, tokenDict={}):
global g_PMS
g_PMS[ATV_udid] = {}
# install plex.tv "virtual" PMS - for myPlex, PlexHome
declarePMS(ATV_udid, 'plex.tv', 'plex.tv', 'https', 'plex.tv', '443')
updatePMSProperty(ATV_udid, 'plex.tv', 'local', '-')
updatePMSProperty(ATV_udid, 'plex.tv', 'owned', '-')
updatePMSProperty(ATV_udid, 'plex.tv', 'accesstoken', tokenDict.get('MyPlex', ''))
#debug
#declarePMS(ATV_udid, '2ndServer', '2ndServer', 'http', '192.168.178.22', '32400', 'local', '1', 'token')
#declarePMS(ATV_udid, 'remoteServer', 'remoteServer', 'http', '127.0.0.1', '1234', 'myplex', '1', 'token')
#debug
# local PMS
if CSettings.getSetting('enable_plexgdm')=='False':
# defined in setting.cfg
ip = CSettings.getSetting('ip_pms')
port = CSettings.getSetting('port_pms')
XML = getXMLFromPMS('http://'+ip+':'+port, '/servers', None, '')
if XML==False:
pass # no response from manual defined server (Settings.cfg)
else:
Server = XML.find('Server')
uuid = Server.get('machineIdentifier')
name = Server.get('name')
declarePMS(ATV_udid, uuid, name, 'http', ip, port) # dflt: token='', local, owned
# todo - check IP to verify "local"?
else:
# PlexGDM
PMS_list = PlexGDM()
for uuid in PMS_list:
PMS = PMS_list[uuid]
declarePMS(ATV_udid, PMS['uuid'], PMS['serverName'], 'http', PMS['ip'], PMS['port']) # dflt: token='', local, owned
# MyPlex servers
if 'PlexHome' in tokenDict:
authtoken = tokenDict.get('PlexHome')
else:
authtoken = tokenDict.get('MyPlex', '')
if not authtoken=='':
XML = getXMLFromPMS('https://plex.tv', '/pms/servers', None, authtoken)
if XML==False:
pass # no data from MyPlex
else:
queue = Queue.Queue()
threads = []
for Dir in XML.getiterator('Server'):
uuid = Dir.get('machineIdentifier')
name = Dir.get('name')
scheme = Dir.get('scheme')
ip = Dir.get('address')
port = Dir.get('port')
token = Dir.get('accessToken', '')
owned = Dir.get('owned', '0')
if uuid in g_PMS.get(ATV_udid, {}):
# server known: local, manually defined or PlexGDM
updatePMSProperty(ATV_udid, uuid, 'accesstoken', token)
updatePMSProperty(ATV_udid, uuid, 'owned', owned)
else:
# remote servers
# check MyPlex data age - skip if >2 days
infoAge = time.time() - int(Dir.get('updatedAt'))
oneDayInSec = 60*60*24
if infoAge > 2*oneDayInSec: # two days in seconds -> expiration in setting?
dprint(__name__, 1, "Server {0} not updated for {1} days - skipping.", name, infoAge/oneDayInSec)
continue
# poke PMS, own thread for each poke
PMS = { 'baseURL': scheme+'://'+ip+':'+port, 'path': '/', 'options': None, 'token': token, \
'data': Dir }
t = Thread(target=getXMLFromPMSToQueue, args=(PMS, queue))
t.start()
threads.append(t)
# wait for requests being answered
for t in threads:
t.join()
# declare new PMSs
while not queue.empty():
(Dir, PMS) = queue.get()
if PMS==False:
continue
uuid = Dir.get('machineIdentifier')
name = Dir.get('name')
scheme = Dir.get('scheme')
ip = Dir.get('address')
port = Dir.get('port')
token = Dir.get('accessToken', '')
owned = Dir.get('owned', '0')
declarePMS(ATV_udid, uuid, name, scheme, ip, port) # dflt: token='', local, owned - updated later
updatePMSProperty(ATV_udid, uuid, 'local', '0') # todo - check IP?
updatePMSProperty(ATV_udid, uuid, 'accesstoken', token)
updatePMSProperty(ATV_udid, uuid, 'owned', owned)
# all servers - update enableGzip
for uuid in g_PMS.get(ATV_udid, {}):
# enable Gzip if not on same host, local&remote PMS depending on setting
enableGzip = (not getPMSProperty(ATV_udid, uuid, 'ip')==IP_self) and ( \
(getPMSProperty(ATV_udid, uuid, 'local')=='1' and CSettings.getSetting('allow_gzip_pmslocal')=='True' ) or \
(getPMSProperty(ATV_udid, uuid, 'local')=='0' and CSettings.getSetting('allow_gzip_pmsremote')=='True') )
updatePMSProperty(ATV_udid, uuid, 'enableGzip', enableGzip)
# debug print all servers
dprint(__name__, 0, "Servers (local, plex.tv, MyPlex): {0}", len(g_PMS[ATV_udid]))
for uuid in g_PMS[ATV_udid]:
dprint(__name__, 1, str(g_PMS[ATV_udid][uuid]))
"""
Plex Media Server communication
parameters:
host
path
options - dict() of PlexConnect-options as received from aTV, None for no std. X-Plex-Args
authtoken - authentication answer from MyPlex Sign In
result:
returned XML or 'False' in case of error
"""
def getXMLFromPMS(baseURL, path, options={}, authtoken='', enableGzip=False):
xargs = {}
if not options==None:
xargs = getXArgsDeviceInfo(options)
if not authtoken=='':
xargs['X-Plex-Token'] = authtoken
dprint(__name__, 1, "URL: {0}{1}", baseURL, path)
dprint(__name__, 1, "xargs: {0}", xargs)
request = urllib2.Request(baseURL+path , None, xargs)
request.add_header('User-agent', 'PlexConnect')
if enableGzip:
request.add_header('Accept-encoding', 'gzip')
try:
response = urllib2.urlopen(request, timeout=20)
except urllib2.URLError as e:
dprint(__name__, 0, 'No Response from Plex Media Server')
if hasattr(e, 'reason'):
dprint(__name__, 0, "We failed to reach a server. Reason: {0}", e.reason)
elif hasattr(e, 'code'):
dprint(__name__, 0, "The server couldn't fulfill the request. Error code: {0}", e.code)
return False
except IOError:
dprint(__name__, 0, 'Error loading response XML from Plex Media Server')
return False
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO.StringIO(response.read())
file = gzip.GzipFile(fileobj=buf)
XML = etree.parse(file)
else:
# parse into etree
XML = etree.parse(response)
dprint(__name__, 1, "====== received PMS-XML ======")
dprint(__name__, 1, XML.getroot())
dprint(__name__, 1, "====== PMS-XML finished ======")
#XMLTree = etree.ElementTree(etree.fromstring(response))
return XML
def getXMLFromPMSToQueue(PMS, queue):
XML = getXMLFromPMS(PMS['baseURL'],PMS['path'],PMS['options'],PMS['token'])
queue.put( (PMS['data'], XML) )
def getXArgsDeviceInfo(options={}):
xargs = dict()
xargs['X-Plex-Device'] = 'AppleTV'
xargs['X-Plex-Model'] = '3,1' # Base it on AppleTV model.
#if not options is None:
if 'PlexConnectUDID' in options:
xargs['X-Plex-Client-Identifier'] = options['PlexConnectUDID'] # UDID for MyPlex device identification
if 'PlexConnectATVName' in options:
xargs['X-Plex-Device-Name'] = options['PlexConnectATVName'] # "friendly" name: aTV-Settings->General->Name.
xargs['X-Plex-Platform'] = 'iOS'
xargs['X-Plex-Client-Platform'] = 'iOS'
if 'aTVFirmwareVersion' in options:
xargs['X-Plex-Platform-Version'] = options['aTVFirmwareVersion']
xargs['X-Plex-Product'] = 'PlexConnect'
xargs['X-Plex-Version'] = __VERSION__
return xargs
"""
provide combined XML representation of local servers' XMLs, eg. /library/section
parameters:
ATV_udid
path
type - owned <> shared (previously: local, myplex)
options
result:
XML
"""
def getXMLFromMultiplePMS(ATV_udid, path, type, options={}):
queue = Queue.Queue()
threads = []
root = etree.Element("MediaConverter")
root.set('friendlyName', type+' Servers')
for uuid in g_PMS.get(ATV_udid, {}):
if (type=='all' and getPMSProperty(ATV_udid, uuid, 'name')!='plex.tv') or \
(type=='owned' and getPMSProperty(ATV_udid, uuid, 'owned')=='1') or \
(type=='shared' and getPMSProperty(ATV_udid, uuid, 'owned')=='0') or \
(type=='local' and getPMSProperty(ATV_udid, uuid, 'local')=='1') or \
(type=='remote' and getPMSProperty(ATV_udid, uuid, 'local')=='0'):
Server = etree.SubElement(root, 'Server') # create "Server" node
Server.set('name', getPMSProperty(ATV_udid, uuid, 'name'))
Server.set('address', getPMSProperty(ATV_udid, uuid, 'ip'))
Server.set('port', getPMSProperty(ATV_udid, uuid, 'port'))
Server.set('baseURL', getPMSProperty(ATV_udid, uuid, 'baseURL'))
Server.set('local', getPMSProperty(ATV_udid, uuid, 'local'))
Server.set('owned', getPMSProperty(ATV_udid, uuid, 'owned'))
baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
Server.set('searchKey', PMS_mark + getURL('', '', '/Search/Entry.xml'))
# request XMLs, one thread for each
PMS = { 'baseURL':baseURL, 'path':path, 'options':options, 'token':token, \
'data': {'uuid': uuid, 'Server': Server} }
t = Thread(target=getXMLFromPMSToQueue, args=(PMS, queue))
t.start()
threads.append(t)
# wait for requests being answered
for t in threads:
t.join()
# add new data to root XML, individual Server
while not queue.empty():
(data, XML) = queue.get()
uuid = data['uuid']
Server = data['Server']
baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
if XML==False:
Server.set('size', '0')
else:
Server.set('size', XML.getroot().get('size', '0'))
for Dir in XML.getiterator('Directory'): # copy "Directory" content, add PMS to links
key = Dir.get('key') # absolute path
Dir.set('key', PMS_mark + getURL('', path, key))
Dir.set('refreshKey', getURL(baseURL, path, key) + '/refresh')
if 'thumb' in Dir.attrib:
Dir.set('thumb', PMS_mark + getURL('', path, Dir.get('thumb')))
if 'art' in Dir.attrib:
Dir.set('art', PMS_mark + getURL('', path, Dir.get('art')))
Server.append(Dir)
for Playlist in XML.getiterator('Playlist'): # copy "Playlist" content, add PMS to links
key = Playlist.get('key') # absolute path
Playlist.set('key', PMS_mark + getURL('', path, key))
if 'composite' in Playlist.attrib:
Playlist.set('composite', PMS_mark + getURL('', path, Playlist.get('composite')))
Server.append(Playlist)
root.set('size', str(len(root.findall('Server'))))
XML = etree.ElementTree(root)
dprint(__name__, 1, "====== Local Server/Sections XML ======")
dprint(__name__, 1, XML.getroot())
dprint(__name__, 1, "====== Local Server/Sections XML finished ======")
return XML # XML representation - created "just in time". Do we need to cache it?
def getURL(baseURL, path, key):
if key.startswith('http://') or key.startswith('https://'): # external server
URL = key
elif key.startswith('/'): # internal full path.
URL = baseURL + key
elif key == '': # internal path
URL = baseURL + path
else: # internal path, add-on
URL = baseURL + path + '/' + key
return URL
"""
MyPlex Sign In, Sign Out
parameters:
username - Plex forum name, MyPlex login, or email address
password
options - dict() of PlexConnect-options as received from aTV - necessary: PlexConnectUDID
result:
username
authtoken - token for subsequent communication with MyPlex
"""
def MyPlexSignIn(username, password, options):
# MyPlex web address
MyPlexHost = 'plex.tv'
MyPlexSignInPath = '/users/sign_in.xml'
MyPlexURL = 'https://' + MyPlexHost + MyPlexSignInPath
# create POST request
xargs = getXArgsDeviceInfo(options)
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
# no certificate, will fail with "401 - Authentification required"
"""
try:
f = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.headers
print "has WWW_Authenticate:", e.headers.has_key('WWW-Authenticate')
print
"""
# provide credentials
### optional... when 'realm' is unknown
##passmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()
##passmanager.add_password(None, address, username, password) # None: default "realm"
passmanager = urllib2.HTTPPasswordMgr()
passmanager.add_password(MyPlexHost, MyPlexURL, username, password) # realm = 'plex.tv'
authhandler = urllib2.HTTPBasicAuthHandler(passmanager)
urlopener = urllib2.build_opener(authhandler)
# sign in, get MyPlex response
try:
response = urlopener.open(request).read()
except urllib2.HTTPError, e:
if e.code==401:
dprint(__name__, 0, 'Authentication failed')
return ('', '')
else:
raise
dprint(__name__, 1, "====== MyPlex sign in XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlex sign in XML finished ======")
# analyse response
XMLTree = etree.ElementTree(etree.fromstring(response))
el_username = XMLTree.find('username')
el_authtoken = XMLTree.find('authentication-token')
if el_username is None or \
el_authtoken is None:
username = ''
authtoken = ''
dprint(__name__, 0, 'MyPlex Sign In failed')
else:
username = el_username.text
authtoken = el_authtoken.text
dprint(__name__, 0, 'MyPlex Sign In successfull')
return (username, authtoken)
def MyPlexSignOut(authtoken):
# MyPlex web address
MyPlexHost = 'plex.tv'
MyPlexSignOutPath = '/users/sign_out.xml'
MyPlexURL = 'http://' + MyPlexHost + MyPlexSignOutPath
# create POST request
xargs = { 'X-Plex-Token': authtoken }
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
response = urllib2.urlopen(request).read()
dprint(__name__, 1, "====== MyPlex sign out XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlex sign out XML finished ======")
dprint(__name__, 0, 'MyPlex Sign Out done')
def MyPlexSwitchHomeUser(id, pin, options, authtoken):
MyPlexHost = 'https://plex.tv'
MyPlexURL = MyPlexHost + '/api/home/users/' + id + '/switch'
if pin:
MyPlexURL += '?pin=' + pin
xargs = {}
if options:
xargs = getXArgsDeviceInfo(options)
xargs['X-Plex-Token'] = authtoken
request = urllib2.Request(MyPlexURL, None, xargs)
request.get_method = lambda: 'POST' # turn into 'POST' - done automatically with data!=None. But we don't have data.
response = urllib2.urlopen(request).read()
dprint(__name__, 1, "====== MyPlexHomeUser XML ======")
dprint(__name__, 1, response)
dprint(__name__, 1, "====== MyPlexHomeUser XML finished ======")
# analyse response
XMLTree = etree.ElementTree(etree.fromstring(response))
el_user = XMLTree.getroot() # root=<user>. double check?
username = el_user.attrib.get('title', '')
authtoken = el_user.attrib.get('authenticationToken', '')
if username and authtoken:
dprint(__name__, 0, 'MyPlex switch HomeUser change successfull')
else:
dprint(__name__, 0, 'MyPlex switch HomeUser change failed')
return (username, authtoken)
"""
Transcode Video support
parameters:
path
AuthToken
options - dict() of PlexConnect-options as received from aTV
action - transcoder action: Auto, Directplay, Transcode
quality - (resolution, quality, bitrate)
subtitle - {'selected', 'dontBurnIn', 'size'}
audio - {'boost'}
result:
final path to pull in PMS transcoder
"""
def getTranscodeVideoPath(path, AuthToken, options, action, quality, subtitle, audio, partIndex):
UDID = options['PlexConnectUDID']
transcodePath = '/video/:/transcode/universal/start.m3u8?'
vRes = quality[0]
vQ = quality[1]
mVB = quality[2]
dprint(__name__, 1, "Setting transcode quality Res:{0} Q:{1} {2}Mbps", vRes, vQ, mVB)
dprint(__name__, 1, "Subtitle: selected {0}, dontBurnIn {1}, size {2}", subtitle['selected'], subtitle['dontBurnIn'], subtitle['size'])
dprint(__name__, 1, "Audio: boost {0}", audio['boost'])
args = dict()
args['session'] = UDID
args['protocol'] = 'hls'
args['videoResolution'] = vRes
args['maxVideoBitrate'] = mVB
args['videoQuality'] = vQ
args['directStream'] = '0' if action=='Transcode' else '1'
# 'directPlay' - handled by the client in MEDIARUL()
args['subtitleSize'] = subtitle['size']
args['skipSubtitles'] = subtitle['dontBurnIn'] #'1' # shut off PMS subtitles. Todo: skip only for aTV native/SRT (or other supported)
args['audioBoost'] = audio['boost']
args['fastSeek'] = '1'
args['path'] = path
args['partIndex'] = partIndex
xargs = getXArgsDeviceInfo(options)
xargs['X-Plex-Client-Capabilities'] = "protocols=http-live-streaming,http-mp4-streaming,http-streaming-video,http-streaming-video-720p,http-mp4-video,http-mp4-video-720p;videoDecoders=h264{profile:high&resolution:1080&level:41};audioDecoders=mp3,aac{bitrate:160000}"
if not AuthToken=='':
xargs['X-Plex-Token'] = AuthToken
return transcodePath + urlencode(args) + '&' + urlencode(xargs)
"""
Direct Video Play support
parameters:
path
AuthToken
Indirect - media indirect specified, grab child XML to gain real path
options
result:
final path to media file
"""
def getDirectVideoPath(key, AuthToken):
if key.startswith('http://') or key.startswith('https://'): # external address - keep
path = key
else:
if AuthToken=='':
path = key
else:
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if key.find('?')==-1:
path = key + '?' + urlencode(xargs)
else:
path = key + '&' + urlencode(xargs)
return path
"""
Transcode Image support
parameters:
key
AuthToken
path - source path of current XML: path[srcXML]
width
height
result:
final path to image file
"""
def getTranscodeImagePath(key, AuthToken, path, width, height):
if key.startswith('http://') or key.startswith('https://'): # external address - can we get a transcoding request for external images?
path = key
elif key.startswith('/'): # internal full path.
path = 'http://127.0.0.1:32400' + key
else: # internal path, add-on
path = 'http://127.0.0.1:32400' + path + '/' + key
path = path.encode('utf8')
# This is bogus (note the extra path component) but ATV is stupid when it comes to caching images, it doesn't use querystrings.
# Fortunately PMS is lenient...
transcodePath = '/photo/:/transcode/' +str(width)+'x'+str(height)+ '/' + quote_plus(path)
args = dict()
args['width'] = width
args['height'] = height
args['url'] = path
if not AuthToken=='':
args['X-Plex-Token'] = AuthToken
return transcodePath + '?' + urlencode(args)
"""
Direct Image support
parameters:
path
AuthToken
result:
final path to image file
"""
def getDirectImagePath(path, AuthToken):
if not AuthToken=='':
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if path.find('?')==-1:
path = path + '?' + urlencode(xargs)
else:
path = path + '&' + urlencode(xargs)
return path
"""
Transcode Audio support
parameters:
path
AuthToken
options - dict() of PlexConnect-options as received from aTV
maxAudioBitrate - [kbps]
result:
final path to pull in PMS transcoder
"""
def getTranscodeAudioPath(path, AuthToken, options, maxAudioBitrate):
UDID = options['PlexConnectUDID']
transcodePath = '/music/:/transcode/universal/start.mp3?'
args = dict()
args['path'] = path
args['session'] = UDID
args['protocol'] = 'http'
args['maxAudioBitrate'] = maxAudioBitrate
xargs = getXArgsDeviceInfo(options)
if not AuthToken=='':
xargs['X-Plex-Token'] = AuthToken
return transcodePath + urlencode(args) + '&' + urlencode(xargs)
"""
Direct Audio support
parameters:
path
AuthToken
result:
final path to audio file
"""
def getDirectAudioPath(path, AuthToken):
if not AuthToken=='':
xargs = dict()
xargs['X-Plex-Token'] = AuthToken
if path.find('?')==-1:
path = path + '?' + urlencode(xargs)
else:
path = path + '&' + urlencode(xargs)
return path
if __name__ == '__main__':
testPlexGDM = 0
testLocalPMS = 0
testSectionXML = 1
testMyPlexXML = 0
testMyPlexSignIn = 0
testMyPlexSignOut = 0
username = 'abc'
password = 'def'
token = 'xyz'
# test PlexGDM
if testPlexGDM:
dprint('', 0, "*** PlexGDM")
PMS_list = PlexGDM()
dprint('', 0, PMS_list)
# test XML from local PMS
if testLocalPMS:
dprint('', 0, "*** XML from local PMS")
XML = getXMLFromPMS('http://127.0.0.1:32400', '/library/sections')
# test local Server/Sections
if testSectionXML:
dprint('', 0, "*** local Server/Sections")
PMS_list = PlexGDM()
XML = getSectionXML(PMS_list, {}, '')
# test XML from MyPlex
if testMyPlexXML:
dprint('', 0, "*** XML from MyPlex")
XML = getXMLFromPMS('https://plex.tv', '/pms/servers', None, token)
XML = getXMLFromPMS('https://plex.tv', '/pms/system/library/sections', None, token)
# test MyPlex Sign In
if testMyPlexSignIn:
dprint('', 0, "*** MyPlex Sign In")
options = {'PlexConnectUDID':'007'}
(user, token) = MyPlexSignIn(username, password, options)
if user=='' and token=='':
dprint('', 0, "Authentication failed")
else:
dprint('', 0, "logged in: {0}, {1}", user, token)
# test MyPlex Sign out
if testMyPlexSignOut:
dprint('', 0, "*** MyPlex Sign Out")
MyPlexSignOut(token)
dprint('', 0, "logged out")
# test transcoder
|
bot.py
|
# AutoWaifuClaimer
# Copyright (C) 2020 RandomBananazz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from asyncio import TimeoutError
import discord
import sys
import re
from concurrent.futures import ThreadPoolExecutor
import threading
import logging
import datetime
import aiohttp
import config
from browsers import Browser
from timers import Timer
# noinspection PyArgumentList
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler(config.LOG_FILE, 'a', 'utf-8'),
logging.StreamHandler(sys.stdout)
])
# Initialize Selenium browser integration in separate module
browser = Browser()
# Declare global timer module
timer: Timer
# Main thread and Discord bot integration here
client = discord.Client()
main_user: discord.User
dm_channel: discord.DMChannel
roll_channel: discord.TextChannel
mudae: discord.Member
ready = False
# To be parsed by $tu and used for the auto roller
timing_info = {
'claim_reset': None,
'claim_available': None,
'rolls_reset': None,
'kakera_available': None,
'kakera_reset': None,
'daily_reset': None
}
async def close_bot():
await client.close()
client.loop.stop()
client.loop.close()
sys.exit()
@client.event
async def on_ready():
# Ensure the browser is ready before proceeding (blocking call)
try:
browser_login.result()
except TimeoutError or ValueError:
await close_bot()
def parse_tu(message):
global timing_info
if message.channel != roll_channel or message.author != mudae: return
match = re.search(r"""^.*?(\w+).*? # Group 1: Username
(can't|can).*? # Group 2: Claim available
(\d+(?:h\ \d+)?)(?=\*\*\ min).*? # Group 3: Claim reset
(\d+(?:h\ \d+)?)(?=\*\*\ min).*? # Group 4: Rolls reset
(?<=\$daily).*?(available|\d+h\ \d+).*? # Group 5: $daily reset
(can't|can).*?(?=react).*? # Group 6: Kakera available
(?:(\d+(?:h\ \d+)?)(?=\*\*\ min)|(now)).*? # Group 7: Kakera reset
(?<=\$dk).*?(ready|\d+h\ \d+) # Group 8: $dk reset
.*$ # End of string
""", message.content, re.DOTALL | re.VERBOSE)
if not match: return
if match.group(1) != main_user.name: return
# Convert __h __ to minutes
times = []
for x in [match.group(3), match.group(4), match.group(5), match.group(7)]:
# Specifically, group 7 may be None if kakera is ready
if x is None:
x = 0
elif 'h ' in x:
x = x.split('h ')
x = int(x[0]) * 60 + int(x[1])
elif x == 'ready' or x == 'now':
x = 0
else:
x = int(x)
times.append(x)
kakera_available = match.group(6) == 'can'
claim_available = match.group(2) == 'can'
timing_info = {
'claim_reset': datetime.datetime.now() + datetime.timedelta(minutes=times[0]),
'claim_available': claim_available,
'rolls_reset': datetime.datetime.now() + datetime.timedelta(minutes=times[1]),
'kakera_available': kakera_available,
'kakera_reset': datetime.datetime.now() + datetime.timedelta(minutes=times[3]),
'daily_reset': datetime.datetime.now() + datetime.timedelta(minutes=times[2]),
}
return True
global main_user, mudae, dm_channel, roll_channel, timer, timing_info, ready
logging.info(f'Bot connected as {client.user.name} with ID {client.user.id}')
main_user = await client.fetch_user(config.USER_ID)
dm_channel = await main_user.create_dm()
roll_channel = await client.fetch_channel(config.CHANNEL_ID)
mudae = await client.fetch_user(config.MUDAE_ID)
# Parse timers by sending $tu command
# Only do so once by checking ready property
if not ready:
logging.info('Attempting to parse $tu command')
pool.submit(Browser.send_text, browser, f'{config.COMMAND_PREFIX}tu')
try:
await client.wait_for('message', check=parse_tu, timeout=3)
except TimeoutError:
logging.critical('Could not parse $tu command, quitting (try again)')
browser.close()
await close_bot()
else:
logging.info('$tu command parsed')
logging.info('Creating new Timer based on parsed information')
timer = Timer(browser, timing_info["claim_reset"], timing_info["rolls_reset"], timing_info["daily_reset"],
timing_info['claim_available'], timing_info["kakera_reset"], timing_info["kakera_available"])
if config.DAILY_DURATION > 0:
threading.Thread(name='daily', target=timer.wait_for_daily).start()
if config.ROLL_DURATION > 0:
threading.Thread(name='roll', target=timer.wait_for_roll).start()
threading.Thread(name='claim', target=timer.wait_for_claim).start()
threading.Thread(name='kakera', target=timer.wait_for_kakera).start()
# For some reason, browser Discord crashes sometime at this point
# Refresh the page to fix
browser.refresh() # Blocking call
logging.info("Listener is ready")
ready = True
@client.event
async def on_message(message):
def parse_embed():
# Regex based parsing adapted from the EzMudae module by Znunu
# https://github.com/Znunu/EzMudae
desc = embed.description
name = embed.author.name
series = None
owner = None
key = False
# Get series and key value if present
match = re.search(r'^(.*?[^<]*)(?:<:(\w+key))?', desc, re.DOTALL)
if match:
series = match.group(1).replace('\n', ' ').strip()
if len(match.groups()) == 3:
key = match.group(2)
# Check if it was a roll
# Look for any
match = re.search(r'(?<=\*)(\d+)', desc, re.DOTALL)
if match: return
# Check if valid parse
if not series: return
# Get owner if present
if not embed.footer.text:
is_claimed = False
else:
match = re.search(r'(?<=Belongs to )\w+', embed.footer.text, re.DOTALL)
if match:
is_claimed = True
owner = match.group(0)
else:
is_claimed = False
# Log in roll list and console/logfile
with open('./data/rolled.txt', 'a') as f:
f.write(f'{datetime.datetime.now()} {name} - {series}\n')
logging.info(f'Parsed roll: {name} - {series} - Claimed: {is_claimed}')
return {'name': name,
'series': series,
'is_claimed': is_claimed,
'owner': owner,
'key': key}
def reaction_check(payload):
# Return if reaction message or author incorrect
if payload.message_id != message.id: return
if payload.user_id != mudae.id: return
# Open thread to click emoji
emoji = payload.emoji
pool.submit(browser.react_emoji, emoji.name, message.id)
return True
## BEGIN ON_MESSAGE BELOW ##
global main_user, mudae, dm_channel, roll_channel, ready
if not ready: return
# Only parse messages from the bot in the right channel that contain a valid embed
if message.channel != roll_channel or message.author != mudae or not len(message.embeds) == 1 or \
message.embeds[0].image.url == message.embeds[0].Empty: return
embed = message.embeds[0]
if not (waifu_result := parse_embed()): return # Return if parsing failed
# If unclaimed waifu was on likelist
if waifu_result['name'] in like_array and not waifu_result['is_claimed']:
if not timer.get_claim_availability(): # No claim is available
logging.warning(f'Character {waifu_result["name"]} was on the likelist but no claim was available!')
await dm_channel.send(content=f"Character {waifu_result['name']} was on the likelist"
f"but no claim was available!", embed=embed)
return
logging.info(f'Character {waifu_result["name"]} in likelist, attempting marry')
# New Mudae bot does not automatically add emojis, just react.
pool.submit(browser.react_emoji, "❤", message.id)
"""
try:
await client.wait_for('raw_reaction_add', check=reaction_check, timeout=3)
except TimeoutError:
logging.critical('Marry failed, could not detect bot reaction')
return
else:
await dm_channel.send(content=f"Marry attempted for {waifu_result['name']}", embed=embed)
timer.set_claim_availability(False)
"""
# If key was rolled
if waifu_result['owner'] == main_user.name and waifu_result['key']:
await dm_channel.send(content=f"{waifu_result['key']} rolled for {waifu_result['name']}", embed=embed)
# If kakera loot available
if waifu_result['is_claimed']:
if not timer.get_kakera_availablilty():
logging.warning(f'Character {waifu_result["name"]} has kakera loot but the loot was not available!')
await dm_channel.send(content=f"Character {waifu_result['name']} had kakera loot"
f" but no loot was available!", embed=embed)
return
logging.info('Attempting to loot kakera')
try:
await client.wait_for('raw_reaction_add', check=reaction_check, timeout=3)
except TimeoutError:
logging.critical('Kakera loot failed, could not detect bot reaction')
return
else:
await dm_channel.send(content=f"Kakera loot attempted for {waifu_result['name']}", embed=embed)
timer.set_kakera_availability(False)
if __name__ == '__main__':
with open('./data/likelist.txt', 'r') as f:
logging.info('Parsing likelist')
like_array = [x.strip() for x in [x for x in f.readlines() if not x.startswith('\n')] if not x.startswith('#')]
pool = ThreadPoolExecutor()
try:
logging.info('Starting browser thread')
browser_login = pool.submit(Browser.browser_login, browser)
client.loop.run_until_complete(client.start(config.BOT_TOKEN))
except KeyboardInterrupt:
logging.critical("Keyboard interrupt, quitting")
client.loop.run_until_complete(client.logout())
except discord.LoginFailure or aiohttp.ClientConnectorError:
logging.critical(f"Improper token has been passed or connection to Discord failed, quitting")
finally:
browser.close()
client.loop.stop()
client.loop.close()
|
zmq_socket_tests.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from multiprocessing import Process
import zmq
from openr.Lsdb import ttypes as lsdb_types
from openr.utils import zmq_socket
class TestSocket(unittest.TestCase):
def test_req_rep(self):
zmq_ctx = zmq.Context()
rep_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
rep_socket.bind("inproc://req_rep_test")
req_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REQ)
req_socket.connect("inproc://req_rep_test")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
req_socket.send_thrift_obj(thrift_obj)
recv_obj = rep_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
rep_socket.send_thrift_obj(recv_obj)
recv_obj = req_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_pub_sub(self):
zmq_ctx = zmq.Context()
pub_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.PUB)
pub_socket.bind("inproc://req_rep_test")
sub_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.SUB)
sub_socket.connect("inproc://req_rep_test")
sub_socket.set_sock_opt(zmq.SUBSCRIBE, b"")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
pub_socket.send_thrift_obj(thrift_obj)
recv_obj = sub_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_dealer_dealer(self):
zmq_ctx = zmq.Context()
d_socket_1 = zmq_socket.ZmqSocket(zmq_ctx, zmq.DEALER)
d_socket_1.bind("inproc://dealer_test")
d_socket_2 = zmq_socket.ZmqSocket(zmq_ctx, zmq.DEALER)
d_socket_2.connect("inproc://dealer_test")
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
d_socket_1.send_thrift_obj(thrift_obj)
recv_obj = d_socket_2.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
d_socket_2.send_thrift_obj(recv_obj)
recv_obj = d_socket_1.recv_thrift_obj(lsdb_types.PrefixDatabase)
self.assertEqual(thrift_obj, recv_obj)
def test_status_conflicts(self):
zmq_ctx = zmq.Context()
bind_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
bind_socket.bind("inproc://status_test")
with self.assertRaises(Exception):
bind_socket.connect("inproc://status_test")
connect_socket = zmq_socket.ZmqSocket(zmq_ctx, zmq.REP)
connect_socket.connect("inproc://status_test")
with self.assertRaises(Exception):
connect_socket.bind("inproc://status_test")
def test_in_multi_processes(self):
thrift_obj = lsdb_types.PrefixDatabase()
thrift_obj.thisNodeName = "some node"
def _send_recv():
req_socket = zmq_socket.ZmqSocket(zmq.Context(), zmq.REQ)
req_socket.connect("tcp://localhost:5000")
req_socket.send_thrift_obj(thrift_obj)
print("request sent")
recv_obj = req_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
print("reply received")
self.assertEqual(thrift_obj, recv_obj)
def _recv_send():
rep_socket = zmq_socket.ZmqSocket(zmq.Context(), zmq.REP)
rep_socket.bind("tcp://*:5000")
recv_obj = rep_socket.recv_thrift_obj(lsdb_types.PrefixDatabase)
print("request received")
self.assertEqual(thrift_obj, recv_obj)
rep_socket.send_thrift_obj(recv_obj)
print("reply sent")
q = Process(target=_recv_send)
q.start()
p = Process(target=_send_recv)
p.start()
p.join()
q.join()
|
vchiq-server.py
|
import sys, time, socket, threading, pickle
#Crude Mock-up of VCHIQ Server in Python
#VCHIQ is Inter-Silicon Remote Procedure Call using a Vector Table
#and is the mechanism used for the ARM to communicate with the VC4 GPU
def server_say_hello():
print("hello")
def server_say_world():
print("world")
def main():
print("[VCHIQ_Server]")
server = vchiq()
class vchiq:
def __init__(self):
self.vector = []
self.max = 255
self.header = "[VCHIQ]: "
self.socket = None
self.address = "127.0.0.1"
self.port = 80
self.mtu = 1500
self.reconnect()
self.thread = threading.Thread(target = self.loop)
self.thread.start()
def reconnect(self):
print("Reconnecting...")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((self.address, self.port))
def loop(self):
while True:
try:
msg = self.socket.recv(self.mtu)
header = msg[:len(self.header)].decode()
code = msg[len(self.header):]
if (header == self.header):
obj = pickle.loads(code)
obj()
else:
print(msg)
except Exception as error:
print(error, file = sys.stderr)
self.reconnect()
if __name__ == "__main__":
main()
|
replay.py
|
import curses
import json
import logging
import logging.config
from threading import Thread
from typing import List, Optional, cast
from scrabble.game import GameState
from scrabble.game.api import Event, GameInitEvent, GameStartEvent, PlayerAddLettersEvent, PlayerMoveEvent
from scrabble.gui.window import CallbackConfig, Window
from scrabble.serializers.game.api import EventSchema
from scrabble.settings import REPLAY_LOGGING_CONFIG
__all__ = [
'ReplayEngine',
]
class ReplayEngine:
def __init__(self, game_id: int, events_filepath: str, player: str, sequence: Optional[int] = None) -> None:
logging.config.dictConfig(REPLAY_LOGGING_CONFIG)
self._logger = logging.getLogger()
self._game_id = game_id
self._events_filepath = events_filepath
self._player = player
self._sequence = sequence
self._window = Window(self._player, CallbackConfig(on_player_move=self._on_player_move))
self._file_events: List[Event] = []
self._events: List[Event] = []
@property
def game_state(self) -> GameState:
return GameState(self._game_id, events=self._events)
def _on_player_move(self, *args, **kwargs) -> None:
...
def _apply_event(self, event: Event) -> None:
try:
self.game_state.apply_event(event)
except Exception:
self._logger.exception(f'Error applying event {event}')
else:
self._events.append(event)
self._gui_apply_event(event)
def _load_events(self, events_filepath: str) -> None:
try:
with open(events_filepath, 'r') as fin:
serialized_events = json.load(fin)
self._file_events = [EventSchema().load(event) for event in serialized_events]
except FileNotFoundError:
raise RuntimeError('Cannot find the game')
def _run_gui(self) -> None:
curses.wrapper(self._window.run)
def _gui_apply_event(self, event: Event) -> None:
if isinstance(event, GameInitEvent):
self._gui_apply__game_init(event)
elif isinstance(event, GameStartEvent):
self._gui_apply__game_start(event)
elif isinstance(event, PlayerAddLettersEvent):
self._gui_apply__player_add_letters(event)
elif isinstance(event, PlayerMoveEvent):
self._gui_apply__player_move(event)
else:
raise ValueError(f'Unknown event {event}')
def _gui_apply__game_init(self, event: GameInitEvent) -> None:
self._window.set_language(event.params.lang)
for player in event.params.players:
self._window.add_player(player)
init_word = event.params.board_settings.init_word
if init_word is not None:
self._window.add_grid_words([
(init_word.start_x, init_word.start_y, init_word.word, init_word.direction.value),
])
for bonus in event.params.board_settings.bonuses:
self._window.add_bonus(bonus.location_x, bonus.location_y, bonus.multiplier)
def _gui_apply__game_start(self, event: GameStartEvent) -> None:
self._window.set_player_turn(cast(str, self.game_state.player_to_move))
def _gui_apply__player_add_letters(self, event: PlayerAddLettersEvent) -> None:
if self._player == event.params.player:
self._window.update_player_letters(self.game_state.get_player_state(self._player).letters)
def _gui_apply__player_move(self, event: PlayerMoveEvent) -> None:
added_words = [
(word.start_x, word.start_y, word.word, word.direction.value)
for word in event.params.words
]
self._window.add_grid_words(added_words)
score = self.game_state.get_player_score(event.params.player)
self._window.update_player_score(event.params.player, score)
self._window.set_player_turn(cast(str, self.game_state.player_to_move))
def run(self) -> None:
gui_thread = Thread(target=self._run_gui)
gui_thread.start()
self._load_events(self._events_filepath)
for event in self._file_events:
if self._sequence is None or event.sequence <= self._sequence:
self._apply_event(event)
gui_thread.join()
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [-1, -1, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3]}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34_tf, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3]}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
DEFAULT_LATENCY_BUCKETS = "0.010,0.050,0.100"
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"queries-single": 1024,
"queries-multi": 24576,
"max-latency": DEFAULT_LATENCY_BUCKETS,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NHWC",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, list of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--queries-single", type=int, default=1024,
help="mlperf number of queries for SingleStream")
parser.add_argument("--queries-offline", type=int, default=24576,
help="mlperf number of queries for Offline")
parser.add_argument("--queries-multi", type=int, default=24576,
help="mlperf number of queries for MultiStream,Server")
parser.add_argument("--max-batchsize", type=int, default=128,
help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--qps", type=int, default=10, help="target qps estimate")
parser.add_argument("--max-latency", type=str, help="mlperf max latency in 99pct tile")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.max_latency:
args.max_latency = [float(i) for i in args.max_latency.split(",")]
try:
args.scenario = [SCENARIO_MAP[scenario] for scenario in args.scenario.split(",")]
except:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.2f}".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = result_dict["mAP"]
acc_str += ", mAP={:.2f}".format(result_dict["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.2f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count = args.count
if not count:
if not args.accuracy:
count = 200
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
for scenario in args.scenario:
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries(): pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if scenario == lg.TestScenario.SingleStream:
settings.min_query_count = args.queries_single
settings.max_query_count = args.queries_single
elif scenario == lg.TestScenario.MultiStream:
settings.min_query_count = args.queries_multi
settings.max_query_count = args.queries_multi
settings.multi_stream_samples_per_query = 4
elif scenario == lg.TestScenario.Server:
max_latency = args.max_latency
elif scenario == lg.TestScenario.Offline:
settings.min_query_count = args.queries_offline
settings.max_query_count = args.queries_offline
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 1000), ds.load_query_samples, ds.unload_query_samples)
if scenario == lg.TestScenario.Server:
for target_latency in max_latency:
log.info("starting {}, latency={}".format(scenario, target_latency))
settings.server_target_latency_ns = int(target_latency * NANO_SEC)
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=os.path.dirname(args.output))
add_results(final_results, "{}-{}".format(scenario, target_latency),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
else:
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=os.path.dirname(args.output))
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open(args.output, "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
client.py
|
import os
import sys
import socket
import threading
import platform
from Crypto.Cipher import AES
server_udp = ('127.0.0.1', 5671)
server_tcp = ('127.0.0.1', 5572)
obj = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
def udp_request(number):
print('requesting udp file')
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.bind(('127.0.0.1',0))
message = 'here'
try:
my_socket.sendto(bytes(message,'utf-8'), server_udp)
print('sent packet handshaking thread number', number)
except:
print('something went wrong in handshaking thread number',number)
with open(str(number)+'_udp.txt','wb') as f:
while True: # receiving the file
buff , address= my_socket.recvfrom(1024)
print(buff)
#print(str('buff'))
if buff == b'':
print('file completed')
break
new_buff = obj.decrypt(buff)
new_buff = new_buff.split(b'\0',1)[0]
f.write(new_buff)
print('ending udp thread number', number)
my_socket.close()
def tcp_request(number):
print('requesting tcp file')
my_socket = socket.socket()
my_socket.bind(('', 0)) # 0 means any port available
print('listening on port:', my_socket.getsockname()[1])
try:
my_socket.connect(server_tcp)
except:
print("couldn't connect to server socket")
return
file_size = int(str(my_socket.recv(25).decode('utf-8')))
print('file size is for thread', number, '=', file_size)
file_name = str(number)+'_tcp.txt'
total_received = 0
with open(file_name,'wb') as f:
while True:
try:
data = my_socket.recv(1024)
total_received += len(obj.decrypt(data))
print('thread number' ,number, 'download complete', total_received/file_size*100)
if data == b'':
break
if len(data) == 0:
break
f.write(obj.decrypt(data).split(b'\0',1)[0])
except:
my_socket.close()
print('something went wrong in receiving')
break
print('file successfully received in thread ', number)
my_socket.close()
def main():
tcp_count = int(input("Enter number of tcp -> "))
udp_count = int(input("Enter number of udp -> "))
print('creating tcp threads')
threads = []
for i in range(1, tcp_count+1):
t = threading.Thread(target=tcp_request, args=(i,))
t.start()
threads.append(t)
print('creating udp threads')
for i in range(1, udp_count+1):
t = threading.Thread(target=udp_request, args=(i,))
t.start()
threads.append(t)
order = input('self destruct process on your click')
print('terminating threads')
#for t in threads:
#t.terminate()
print('everything is put to ashes')
if __name__ == '__main__':
main()
|
thread2.py
|
#!/usr/bin/python -u
import string, sys, time
import thread
from threading import Thread, Lock
import libxml2
THREADS_COUNT = 15
failed = 0
class ErrorHandler:
def __init__(self):
self.errors = []
self.lock = Lock()
def handler(self,ctx,str):
self.lock.acquire()
self.errors.append(str)
self.lock.release()
def getLineNumbersDefault():
old = libxml2.lineNumbersDefault(0)
libxml2.lineNumbersDefault(old)
return old
def test(expectedLineNumbersDefault):
time.sleep(1)
global failed
# check a per thread-global
if expectedLineNumbersDefault != getLineNumbersDefault():
failed = 1
print "FAILED to obtain correct value for " \
"lineNumbersDefault in thread %d" % thread.get_ident()
# check ther global error handler
# (which is NOT per-thread in the python bindings)
try:
doc = libxml2.parseFile("bad.xml")
except:
pass
else:
assert "failed"
# global error handler
eh = ErrorHandler()
libxml2.registerErrorHandler(eh.handler,"")
# set on the main thread only
libxml2.lineNumbersDefault(1)
test(1)
ec = len(eh.errors)
if ec == 0:
print "FAILED: should have obtained errors"
sys.exit(1)
ts = []
for i in range(THREADS_COUNT):
# expect 0 for lineNumbersDefault because
# the new value has been set on the main thread only
ts.append(Thread(target=test,args=(0,)))
for t in ts:
t.start()
for t in ts:
t.join()
if len(eh.errors) != ec+THREADS_COUNT*ec:
print "FAILED: did not obtain the correct number of errors"
sys.exit(1)
# set lineNumbersDefault for future new threads
libxml2.thrDefLineNumbersDefaultValue(1)
ts = []
for i in range(THREADS_COUNT):
# expect 1 for lineNumbersDefault
ts.append(Thread(target=test,args=(1,)))
for t in ts:
t.start()
for t in ts:
t.join()
if len(eh.errors) != ec+THREADS_COUNT*ec*2:
print "FAILED: did not obtain the correct number of errors"
sys.exit(1)
if failed:
print "FAILED"
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
client.py
|
from ..libs import websocket
from ..types import TD_ViewSnapshot, ListenerEvent
from typing import Optional, Protocol
import sublime
import threading
class TransportCallbacks(Protocol):
def on_open(self, ws: websocket.WebSocketApp) -> None:
"""Called when connected to the websocket."""
...
def on_message(self, ws: websocket.WebSocketApp, message: str) -> None:
"""Called when received a message from the websocket."""
...
def on_error(self, ws: websocket.WebSocketApp, error: str) -> None:
"""Called when there is an exception occurred in the websocket."""
...
def on_close(self, ws: websocket.WebSocketApp, close_status_code: int, close_msg: str) -> None:
"""Called when disconnected from the websocket."""
...
class NullTransportCallbacks:
on_open = None
on_message = None
on_error = None
on_close = None
class GuesslangClient:
def __init__(
self,
host: str,
port: int,
*,
callback_object: Optional[TransportCallbacks] = None,
) -> None:
self.host = host
self.port = port
self.callback_object = callback_object or NullTransportCallbacks()
self.ws: Optional[websocket.WebSocketApp] = None
self._start_client_thread()
def __del__(self) -> None:
if self.ws:
self.ws.close()
def _start_client_thread(self) -> None:
def _worker(client: GuesslangClient) -> None:
client.ws = websocket.WebSocketApp(
f"ws://{client.host}:{client.port}",
on_open=client.callback_object.on_open,
on_message=client.callback_object.on_message,
on_error=client.callback_object.on_error,
on_close=client.callback_object.on_close,
)
client.ws.run_forever()
# websocket.enableTrace(True)
self.thread = threading.Thread(target=_worker, args=(self,))
self.thread.start()
@staticmethod
def is_connected(ws: websocket.WebSocketApp) -> bool:
return ws.sock is not None
def request_guess_snapshot(
self,
view_info: TD_ViewSnapshot,
*,
model: str = "",
event: Optional[ListenerEvent] = None,
) -> None:
if self.ws and self.is_connected(self.ws):
self.ws.send(
sublime.encode_value(
{
"id": view_info["id"],
"model": model,
"content": view_info["content"],
"event_name": event.value if event else None,
}
)
)
|
waterrowerinterface.py
|
# ---------------------------------------------------------------------------
# Original code from the bfritscher Repo waterrower
# https://github.com/bfritscher/waterrower
# ---------------------------------------------------------------------------
#
# -*- coding: utf-8 -*-
import threading
import logging
import time
import serial
import serial.tools.list_ports
logger = logging.getLogger(__name__)
MEMORY_MAP = {'055': {'type': 'total_distance_m', 'size': 'double', 'base': 16},
'140': {'type': 'total_strokes', 'size': 'double', 'base': 16},
'088': {'type': 'watts', 'size': 'double', 'base': 16},
'08A': {'type': 'total_kcal', 'size': 'triple', 'base': 16},
'14A': {'type': 'avg_distance_cmps', 'size': 'double', 'base': 16},
'148': {'type': 'total_speed_cmps', 'size': 'double', 'base': 16},
'1E0': {'type': 'display_sec_dec', 'size': 'single', 'base': 10},
'1E1': {'type': 'display_sec', 'size': 'single', 'base': 10},
'1E2': {'type': 'display_min', 'size': 'single', 'base': 10},
'1E3': {'type': 'display_hr', 'size': 'single', 'base': 10},
# from zone math
'1A0': {'type': 'heart_rate', 'size': 'double', 'base': 16},
'1A6': {'type': '500mps', 'size': 'double', 'base': 16},
'1A9': {'type': 'stroke_rate', 'size': 'single', 'base': 16},
# explore
'142': {'type': 'avg_time_stroke_whole', 'size': 'single', 'base': 16},
'143': {'type': 'avg_time_stroke_pull', 'size': 'single', 'base': 16},
#other
'0A9': {'type': 'tank_volume', 'size': 'single', 'base': 16, 'not_in_loop': True},
}
# ACH values = Ascii coded hexadecimal
# REQUEST sent from PC to device
# RESPONSE sent from device to PC
USB_REQUEST = "USB" # Application starting communication’s
WR_RESPONSE = "_WR_" # Hardware Type, Accept USB start sending packets
EXIT_REQUEST = "EXIT" # Application is exiting, stop sending packets
OK_RESPONSE = "OK" # Packet Accepted
ERROR_RESPONSE = "ERROR" # Unknown packet
PING_RESPONSE = "PING" # Ping
RESET_REQUEST = "RESET" # Request the rowing computer to reset, disable interactive mode
MODEL_INFORMATION_REQUEST = "IV?" # Request Model Information
MODEL_INFORMATION_RESPONSE = "IV" # Current model information IV+Model+Version High+Version Low
READ_MEMORY_REQUEST = "IR" # Read a memory location IR+(S=Single,D=Double,T=Triple) + XXX
READ_MEMORY_RESPONSE = "ID" # Value from a memory location ID +(type) + Y3 Y2 Y1
STROKE_START_RESPONSE = "SS" # Start of stroke
STROKE_END_RESPONSE = "SE" # End of stroke
PULSE_COUNT_RESPONSE = "P" # Pulse Count XX in the last 25mS, ACH value
# Display Settings (not used)
DISPLAY_SET_INTENSITY_MPS_REQUEST = "DIMS"
DISPLAY_SET_INTENSITY_MPH_REQUEST = "DIMPH"
DISPLAY_SET_INTENSITY_500M_REQUEST = "DI500"
DISPLAY_SET_INTENSITY_2KM_REQUEST = "DI2KM"
DISPLAY_SET_INTENSITY_WATTS_REQUEST = "DIWA"
DISPLAY_SET_INTENSITY_CALHR_REQUEST = "DICH"
DISPLAY_SET_INTENSITY_AVG_MPS_REQUEST = "DAMS"
DISPLAY_SET_INTENSITY_AVG_MPH_REQUEST = "DAMPH"
DISPLAY_SET_INTENSITY_AVG_500M_REQUEST = "DA500"
DISPLAY_SET_INTENSITY_AVG_2KM_REQUEST = "DA2KM"
DISPLAY_SET_DISTANCE_METERS_REQUEST = "DDME"
DISPLAY_SET_DISTANCE_MILES_REQUEST = "DDMI"
DISPLAY_SET_DISTANCE_KM_REQUEST = "DDKM"
DISPLAY_SET_DISTANCE_STROKES_REQUEST = "DDST"
# Interactive mode
INTERACTIVE_MODE_START_RESPONSE = "AIS" # interactive mode requested by device
INTERACTIVE_MODE_START_ACCEPT_REQUEST = "AIA" # confirm interactive mode, key input is redirect to PC
INTERACTIVE_MODE_END_REQUEST = "AIE" # cancel interactive mode
INTERACTIVE_KEYPAD_RESET_RESPONSE = "AKR" # RESET key pressed, interactive mode will be cancelled
INTERACTIVE_KEYPAD_UNITS_RESPONSE = "AK1" # Units button pressed
INTERACTIVE_KEYPAD_ZONES_RESPONSE = "AK2" # Zones button pressed
INTERACTIVE_KEYPAD_WORKOUT_RESPONSE = "AK3" # Workout button pressed
INTERACTIVE_KEYPAD_UP_RESPONSE = "AK4" # Up arrow button pressed
INTERACTIVE_KEYPAD_OK_RESPONSE = "AK5" # Ok button pressed
INTERACTIVE_KEYPAD_DOWN_RESPONSE = "AK6" # Down arrow button pressed
INTERACTIVE_KEYPAD_ADVANCED_RESPONSE = "AK7" # Advanced button pressed
INTERACTIVE_KEYPAD_STORED_RESPONSE = "AK8" # Stored Programs button pressed
INTERACTIVE_KEYPAD_HOLD_RESPONSE = "AK9" # Hold/cancel button pressed
# Workout
WORKOUT_SET_DISTANCE_REQUEST = "WSI" # Define a distance workout + x(unit, 1-4) + YYYY = ACH
WORKOUT_SET_DURATION_REQUEST = "WSU" # Define a duration workout + YYYY = ACH seconds
WORKOUT_INTERVAL_START_SET_DISTANCE_REQUEST = "WII" # Define an interval distance workout
WORKOUT_INTERVAL_START_SET_DURATION_REQUEST = "WIU" # Define an interval duration workout
WORKOUT_INTERVAL_ADD_END_REQUEST = "WIN" # Add/End an interval to a workout XXXX(==FFFFF to end) + YYYY
# UNITS
UNIT_METERS = 1
UNIT_MILES = 2
UNIT_KM = 3
UNIT_STROKES = 4
SIZE_MAP = {'single': 'IRS',
'double': 'IRD',
'triple': 'IRT',}
UNIT_MAP = {'meters': 1,
'miles': 2,
'km': 3,
'strokes': 4}
SIZE_PARSE_MAP = {'single': lambda cmd: cmd[6:8],
'double': lambda cmd: cmd[6:10],
'triple': lambda cmd: cmd[6:12]}
def find_port():
attempts = 0
while True:
attempts += 1
ports = serial.tools.list_ports.comports()
for (i, (path, name, _)) in enumerate(ports):
if "WR" in name:
logger.info("port found: %s" % path)
return path
#print("port not found retrying in 5s")
if ((attempts - 1) % 360) == 0: # message every ~30 minutes
logger.warning("port not found in %d attempts; retrying every 5s",
attempts)
time.sleep(5)
def build_daemon(target):
t = threading.Thread(target=target)
t.daemon = True
return t
def build_event(type, value=None, raw=None):
return {"type": type,
"value": value,
"raw": raw,
"at": int(round(time.time() * 1000))}
def is_live_thread(t):
return t and t.is_alive()
def read_reply(cmd):
address = cmd[3:6]
memory = MEMORY_MAP.get(address)
if memory:
size = memory['size']
value_fn = SIZE_PARSE_MAP.get(size, lambda cmd: None)
value = value_fn(cmd)
if value is None:
logger.error('unknown size: %s', size)
else:
return build_event(memory['type'], int(value, base=memory['base']), cmd)
else:
logger.error('cannot read reply for %s', cmd)
def event_from(line):
try:
cmd = line.strip() # to ensure no space are in front or at the back call the function strip()
cmd = cmd.decode('utf8') # encode it to utf8 ro remove b'
if cmd == STROKE_START_RESPONSE: # with is "SS" from the waterrower
return build_event(type='stroke_start', raw=cmd) # Call the methode to create a dict with the name stroke_start and the row command used for it "SS"
elif cmd == STROKE_END_RESPONSE: # with is "SE" from the waterrower
return build_event(type='stroke_end', raw=cmd) # Call the methode to create a dict with the name stroke_end and the row command used for it "SE"
elif cmd == OK_RESPONSE: # If waterrower responce "OK" do nothing
return None
elif cmd[:2] == MODEL_INFORMATION_RESPONSE: # If MODEL information has been request, the model responce would be "IV"
return build_event(type='model', raw=cmd) # Call the methode to create a dict with the model and the row command used for it "SE"
elif cmd[:2] == READ_MEMORY_RESPONSE: # if after memory request the responce comes from the waterrower
return read_reply(cmd) # proced to the function read_reply which strips away everything and keeps the value and create the event dict for that request
elif cmd[:4] == PING_RESPONSE: # if Ping responce is recived which is all the time the rower is in standstill
return build_event(type='ping', raw=cmd) # do nothing
elif cmd[:1] == PULSE_COUNT_RESPONSE: # Pluse count count the amount of 25 teeth passed 25teeth passed = P1
return build_event(type='pulse', raw=cmd) # do nothing
elif cmd == ERROR_RESPONSE: # If Waterrower responce with an error
return build_event(type='error', raw=cmd) # crate an event with the dict entry error and the raw command
elif cmd[:2] == STROKE_START_RESPONSE: # Pluse count count the amount of 25 teeth passed 25teeth passed = P1
print(cmd)
else:
return None
except Exception as e:
logger.error('could not build event for: %s %s', line, e)
class Rower(object):
def __init__(self, options=None):
self._callbacks = set()
self._stop_event = threading.Event()
self._demo = False
# if options and options.demo:
# from demo import FakeS4
# self._serial = FakeS4()
# self._demo = True
# else:
self._serial = serial.Serial()
self._serial.baudrate = 19200
self._request_thread = build_daemon(target=self.start_requesting)
self._capture_thread = build_daemon(target=self.start_capturing)
self._request_thread.start()
self._capture_thread.start()
def is_connected(self):
return self._serial.isOpen() and is_live_thread(self._request_thread) and \
is_live_thread(self._capture_thread)
def _find_serial(self):
if not self._demo:
self._serial.port = find_port()
try:
self._serial.open()
#print("serial open")
logger.info("serial open")
except serial.SerialException as e:
print("serial open error waiting")
time.sleep(5)
self._serial.close()
self._find_serial()
def open(self):
if self._serial and self._serial.isOpen():
self._serial.close()
self._find_serial()
if self._stop_event.is_set():
#print("reset threads")
logger.info("reset threads")
self._stop_event.clear()
self._request_thread = build_daemon(target=self.start_requesting)
self._capture_thread = build_daemon(target=self.start_capturing)
self._request_thread.start()
logger.info("Thread daemon _request started")
self._capture_thread.start()
logger.info("Thread daemon _capture started")
self.write(USB_REQUEST)
def close(self):
self.notify_callbacks(build_event("exit"))
if self._stop_event:
self._stop_event.set()
if self._serial and self._serial.isOpen():
self.write(EXIT_REQUEST)
time.sleep(0.1) # time for capture and request loops to stop running
self._serial.close()
def write(self, raw):
try:
self._serial.write(str.encode(raw.upper() + '\r\n'))
self._serial.flush()
except Exception as e:
print(e)
#print("Serial error try to reconnect")
logger.error("Serial error try to reconnect")
self.open()
def start_capturing(self):
while not self._stop_event.is_set():
if self._serial.isOpen():
try:
line = self._serial.readline()
event = event_from(line)
if event:
self.notify_callbacks(event)
except Exception as e:
#print("could not read %s" % e)
logger.error("could not read %s" % e)
try:
self._serial.reset_input_buffer()
except Exception as e2:
#print("could not reset_input_buffer %s" % e2)
logger.error("could not reset_input_buffer %s" % e2)
else:
self._stop_event.wait(0.1)
def start_requesting(self):
while not self._stop_event.is_set():
if self._serial.isOpen():
for address in MEMORY_MAP:
if 'not_in_loop' not in MEMORY_MAP[address]:
self.request_address(address)
self._stop_event.wait(0.025)
else:
self._stop_event.wait(0.1)
def reset_request(self):
self.write(RESET_REQUEST)
self.notify_callbacks(build_event('reset'))
logger.info("Reset requested")
def request_info(self):
self.write(MODEL_INFORMATION_REQUEST)
self.request_address('0A9')
def request_address(self, address):
size = MEMORY_MAP[address]['size']
cmd = SIZE_MAP[size]
self.write(cmd + address)
def register_callback(self, cb):
self._callbacks.add(cb)
def remove_callback(self, cb):
self._callbacks.remove(cb)
def notify_callbacks(self, event):
for cb in self._callbacks:
cb(event)
|
events.py
|
import collections
import copy
import threading
import time
import six
class Listener(object):
raildriver = None
bindings = None
exc = None
interval = None
running = False
thread = None
subscribed_fields = None
current_data = None
previous_data = None
iteration = 0
special_fields = {
'!Coordinates': 'get_current_coordinates',
'!FuelLevel': 'get_current_fuel_level',
'!Gradient': 'get_current_gradient',
'!Heading': 'get_current_heading',
'!IsInTunnel': 'get_current_is_in_tunnel',
'!LocoName': 'get_loco_name',
'!Time': 'get_current_time',
}
def __init__(self, raildriver, interval=0.5):
"""
Initialize control listener. Requires raildriver.RailDriver instance.
:param raildriver: RailDriver instance
:param interval: how often to check the state of controls
"""
self.interval = interval
self.raildriver = raildriver
self.bindings = collections.defaultdict(list)
self.current_data = collections.defaultdict(lambda: None)
self.previous_data = collections.defaultdict(lambda: None)
self.subscribed_fields = []
def __getattr__(self, item):
return self.bindings[item].append
def _execute_bindings(self, type, *args, **kwargs):
for binding in self.bindings[type]:
binding(*args, **kwargs)
def _main_iteration(self):
self.iteration += 1
self.previous_data = copy.copy(self.current_data)
for field_name in self.subscribed_fields:
try:
current_value = self.raildriver.get_current_controller_value(field_name)
except ValueError:
del self.current_data[field_name]
else:
self.current_data[field_name] = current_value
if current_value != self.previous_data[field_name] and self.iteration > 1:
binding_name = 'on_{}_change'.format(field_name.lower())
self._execute_bindings(binding_name, current_value, self.previous_data[field_name])
for field_name, method_name in self.special_fields.items():
current_value = getattr(self.raildriver, method_name)()
self.current_data[field_name] = current_value
if current_value != self.previous_data[field_name] and self.iteration > 1:
binding_name = 'on_{}_change'.format(field_name[1:].lower())
self._execute_bindings(binding_name, current_value, self.previous_data[field_name])
def _main_loop(self):
try:
while self.running:
self._main_iteration()
time.sleep(self.interval)
except Exception as exc:
self.exc = exc
def start(self):
"""
Start listening to changes
"""
self.running = True
self.thread = threading.Thread(target=self._main_loop)
self.thread.start()
def stop(self):
"""
Stop listening to changes. This has to be explicitly called before you terminate your program
or the listening thread will never die.
"""
self.running = False
def subscribe(self, field_names):
"""
Subscribe to given fields.
Special fields cannot be subscribed to and will be checked on every iteration. These include:
* loco name
* coordinates
* fuel level
* gradient
* current heading
* is in tunnel
* time
You can of course still receive notifications when those change.
It is important to understand that when the loco changes the set of possible controllers will likely change
too. Any missing field changes will stop triggering notifications.
:param field_names: list
:raises ValueError if field is not present on current loco
"""
available_controls = dict(self.raildriver.get_controller_list()).values()
for field in field_names:
if field not in available_controls:
raise ValueError('Cannot subscribe to a missing controller {}'.format(field))
self.subscribed_fields = field_names
|
device.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import logging
import threading
import logging.config
import numpy as np
from datetime import datetime
from collections import defaultdict
from .io import discover_hosts, io_from_host, Ws
from .services import name2mod
from anytree import AnyNode, RenderTree, DoubleStyle
def run_from_unittest():
return 'unittest' in sys.services
known_host = {
'ergo': ['/dev/cu.usbserial-DN2AAOVK', '/dev/cu.usbserial-DN2YEFLN'],
'handy': ['/dev/cu.usbserial-DN2X236E'],
'eddy': ['pi-gate.local'],
}
class contList(list):
def __repr__(self):
s = '-------------------------------------------------\n'
s += '{:<20s}{:<20s}{:<5s}\n'.format("Type", "Alias", "ID")
s += '-------------------------------------------------\n'
for elem in self:
s += '{:<20s}{:<20s}{:<5d}\n'.format(elem.type, elem.alias, elem.id)
return s
class nodeList(list):
def __repr__(self):
# Display the topology
s = ''
prefill = ''
prechild = False
for pre, fill, node in RenderTree(self[0], style=DoubleStyle()):
child = []
if (node.parent == None):
branch = " ┃ "
for i,x in enumerate(node.port_table):
child.append(i)
else:
l_port_id = '?'
for i,x in enumerate(node.parent.port_table):
if (x == node.id):
l_port_id = str(i)
r_port_id = node.port_table.index(min(node.port_table))
for i,x in enumerate(node.port_table):
if ((i != r_port_id) and (x != 65535)):
child.append(i)
branch = str(l_port_id) + ">┃" + str(r_port_id) + " "
prefill = (prefill[:len(fill)]) if len(prefill) > len(fill) else prefill
s +='{:<{fillsize}s}'.format(prefill, fillsize=len(fill))
if (prechild == True):
position = -4
s = s[:position] + '║' + s[position+1:]
s += " ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n"
tmpstr = "%s╭node %s" % (branch, node.id)
s += pre + '{:^10s}'.format(tmpstr)
if (node.certified == True):
s += '{:^41s}'.format("Certified") + "┃\n"
else:
s += '{:^41s}'.format("/!\\ Not certified") + "┃\n"
s += fill + " ┃ │ " + '{:<20s}{:<20s}{:<5s}'.format("Type", "Alias", "ID")+ "┃\n"
for y,elem in enumerate(node.services):
if (y == (len(node.services)-1)):
s += fill + " ┃ ╰> " + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id)+ "┃\n"
else:
s += fill + " ┃ ├> " + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id) + "┃\n"
if (not child):
s += fill + " >┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n"
prechild = False
else:
s += fill + "╔>┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\n"
prechild = True
prefill = fill
return s
class Device(object):
_heartbeat_timeout = 5 # in sec.
_max_alias_length = 15
_base_log_conf = os.path.join(os.path.dirname(__file__),
'logging_conf.json')
@classmethod
def discover(cls):
hosts = discover_hosts()
possibilities = {
k: [h for h in v if h in hosts]
for k, v in known_host.items()
}
return possibilities
def __init__(self, host,
IO=None,
log_conf=_base_log_conf,
test_mode=False,
background_task=True,
*args, **kwargs):
if IO is not None:
self._io = IO(host=host, *args, **kwargs)
else:
self._io = io_from_host(host=host,
*args, **kwargs)
if os.path.exists(log_conf):
with open(log_conf) as f:
config = json.load(f)
logging.config.dictConfig(config)
self.logger = logging.getLogger(__name__)
self.logger.info('Connected to "{}".'.format(host))
self._send_lock = threading.Lock()
self._cmd_lock = threading.Lock()
# We force a first poll to setup our model.
self._setup()
self.logger.info('Device setup.')
self._last_update = time.time()
self._running = True
self._pause = False
if(background_task == True):
# Setup both poll/push synchronization loops.
self._poll_bg = threading.Thread(target=self._poll_and_up)
self._poll_bg.daemon = True
self._poll_bg.start()
self._baudrate = 1000000
def close(self):
self._running = False
self._poll_bg.join(timeout=2.0)
if self._poll_bg.is_alive():
# _poll_bg didn't terminate within the timeout
print("Warning: device closed on timeout, background thread is still running.")
self._io.close()
@property
def baudrate(self):
return self._baudrate
@baudrate.setter
def baudrate(self, baudrate):
self._send({'baudrate': baudrate})
self._baudrate = baudrate
time.sleep(0.01)
def benchmark(self, target_id, data, repetition):
data = np.array(data, dtype=np.uint8)
self._bench_settings = {'benchmark': {'target': target_id, 'repetitions': repetition, 'data': [len(data)]}}
self._bench_Data = data.tobytes()
self._write( json.dumps(self._bench_settings).encode() + '\n'.encode() + self._bench_Data)
state = self._poll_once()
startTime = time.time()
retry = 0
while ('benchmark' not in state):
state = self._poll_once()
if (time.time()-startTime > 30):
self._write( json.dumps(self._bench_settings).encode() + '\n'.encode() + self._bench_Data)
retry = retry+1
if (retry == 3):
return (0, 100)
startTime = time.time()
#self._pause = False
return (state['benchmark']['data_rate'], state['benchmark']['fail_rate'])
def pause(self):
self._pause = True
time.sleep(1)
def play(self):
self._pause = False
def _setup(self):
self.logger.info('Sending detection signal.')
self._send({'detection': {}})
self.logger.info('Waiting for routing table...')
startTime = time.time()
state = self._poll_once()
while ('routing_table' not in state):
if ('route_table' in state):
self.logger.info("Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\n Please consider updating Luos on your boards")
return
state = self._poll_once()
if (time.time()-startTime > 1):
self._send({'detection': {}})
startTime = time.time()
# Create nodes
self._services = []
self._nodes = []
for i, node in enumerate(state['routing_table']):
if ('node_id' not in node):
self.logger.info("Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\n Please consider updating Luos on your boards")
parent_elem = None
# find a parent and create a link
if (min(node["port_table"]) < node["services"][0]["id"]):
parent_id = min(node["port_table"])
for elem in self._nodes:
if (elem.id == parent_id):
parent_elem = elem
break;
# create the node
self._nodes.append(AnyNode(id=node["node_id"], certified=node["certified"], parent=parent_elem, port_table=node["port_table"]))
filtered_services = contList([mod for mod in node["services"]
if 'type' in mod and mod['type'] in name2mod.keys()])
# Create a list of services in the node
self._nodes[i].services = [
name2mod[mod['type']](id=mod['id'],
alias=mod['alias'],
device=self)
for mod in filtered_services
if 'type' in mod and 'id' in mod and 'alias' in mod
]
# Create a list of services of the entire device
self._services = self._services + self._nodes[i].services
for mod in self._nodes[i].services:
setattr(self, mod.alias, mod)
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
self._cmd_data = []
self._binary = []
# We push our current state to make sure that
# both our model and the hardware are synced.
self._push_once()
@property
def services(self):
return contList(self._services)
@property
def nodes(self):
return nodeList(self._nodes)
# Poll state from hardware.
def _poll_once(self):
self._state = self._io.read()
self._state['timestamp'] = time.time()
return self._state
def _poll_and_up(self):
while self._running:
if not self._pause :
state = self._poll_once()
self._update(state)
self._push_once()
else :
time.sleep(0.1)
# Update our model with the new state.
def _update(self, new_state):
if 'dead_service' in new_state :
#we have lost a service put a flag on this service
alias = new_state['dead_service']
if hasattr(self, alias):
getattr(self, alias)._kill()
if 'assert' in new_state :
# A node assert, print assert informations
if (('node_id' in new_state['assert']) and ('file' in new_state['assert']) and ('line' in new_state['assert'])):
s = "************************* ASSERT *************************\n"
s += "* Node " + str(new_state['assert']['node_id']) + " assert in file " + new_state['assert']['file'] + " line " + str(new_state['assert']['line'])
s += "\n**********************************************************"
print (s)
if 'services' not in new_state:
return
for alias, mod in new_state['services'].items():
if hasattr(self, alias):
getattr(self, alias)._update(mod)
self._last_update = time.time()
def update_cmd(self, alias, key, val):
with self._cmd_lock:
self._cmd[alias][key] = val
def update_data(self, alias, key, val, data):
with self._cmd_lock:
self._cmd_data.append({alias: {key: val}})
self._binary.append(data.tobytes())
def _push_once(self):
with self._cmd_lock:
if self._cmd:
self._write( json.dumps({'services': self._cmd}).encode())
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
for cmd, binary in zip(self._cmd_data, self._binary):
time.sleep(0.01)
self._write( json.dumps({'services': cmd}).encode() + '\n'.encode() + binary)
self._cmd_data = []
self._binary = []
def _send(self, msg):
with self._send_lock:
self._io.send(msg)
def _write(self, data):
with self._send_lock:
self._io.write(data)
|
test_functionality.py
|
import os
import sys
import time
import threading
import unittest
import yappi
import _yappi
import utils
import multiprocessing # added to fix http://bugs.python.org/issue15881 for > Py2.6
import subprocess
class BasicUsage(utils.YappiUnitTestCase):
def test_callback_function_int_return_overflow(self):
# this test is just here to check if any errors are generated, as the err
# is printed in C side, I did not include it here. THere are ways to test
# this deterministically, I did not bother
import ctypes
def _unsigned_overflow_margin():
return 2**(ctypes.sizeof(ctypes.c_void_p) * 8) - 1
def foo():
pass
#with utils.captured_output() as (out, err):
yappi.set_context_id_callback(_unsigned_overflow_margin)
yappi.set_tag_callback(_unsigned_overflow_margin)
yappi.start()
foo()
def test_filter(self):
def a():
pass
def b():
a()
def c():
b()
_TCOUNT = 5
ts = []
yappi.start()
for i in range(_TCOUNT):
t = threading.Thread(target=c)
t.start()
ts.append(t)
for t in ts:
t.join()
yappi.stop()
ctx_ids = []
for tstat in yappi.get_thread_stats():
if tstat.name == '_MainThread':
main_ctx_id = tstat.id
else:
ctx_ids.append(tstat.id)
fstats = yappi.get_func_stats(filter={"ctx_id": 9})
self.assertTrue(fstats.empty())
fstats = yappi.get_func_stats(
filter={
"ctx_id": main_ctx_id,
"name": "c"
}
) # main thread
self.assertTrue(fstats.empty())
for i in ctx_ids:
fstats = yappi.get_func_stats(
filter={
"ctx_id": i,
"name": "a",
"ncall": 1
}
)
self.assertEqual(fstats.pop().ncall, 1)
fstats = yappi.get_func_stats(filter={"ctx_id": i, "name": "b"})
self.assertEqual(fstats.pop().ncall, 1)
fstats = yappi.get_func_stats(filter={"ctx_id": i, "name": "c"})
self.assertEqual(fstats.pop().ncall, 1)
yappi.clear_stats()
yappi.start(builtins=True)
time.sleep(0.1)
yappi.stop()
fstats = yappi.get_func_stats(filter={"module": "time"})
self.assertEqual(len(fstats), 1)
def test_filter_callback(self):
def a():
time.sleep(0.1)
def b():
a()
def c():
pass
def d():
pass
yappi.set_clock_type("wall")
yappi.start(builtins=True)
a()
b()
c()
d()
stats = yappi.get_func_stats(
filter_callback=lambda x: yappi.func_matches(x, [a, b])
)
#stats.print_all()
r1 = '''
tests/test_functionality.py:98 a 2 0.000000 0.200350 0.100175
tests/test_functionality.py:101 b 1 0.000000 0.120000 0.100197
'''
self.assert_traces_almost_equal(r1, stats)
self.assertEqual(len(stats), 2)
stats = yappi.get_func_stats(
filter_callback=lambda x: yappi.
module_matches(x, [sys.modules[__name__]])
)
r1 = '''
tests/test_functionality.py:98 a 2 0.000000 0.230130 0.115065
tests/test_functionality.py:101 b 1 0.000000 0.120000 0.109011
tests/test_functionality.py:104 c 1 0.000000 0.000002 0.000002
tests/test_functionality.py:107 d 1 0.000000 0.000001 0.000001
'''
self.assert_traces_almost_equal(r1, stats)
self.assertEqual(len(stats), 4)
stats = yappi.get_func_stats(
filter_callback=lambda x: yappi.func_matches(x, [time.sleep])
)
r1 = '''
time.sleep 2 0.206804 0.220000 0.103402
'''
self.assert_traces_almost_equal(r1, stats)
self.assertEqual(len(stats), 1)
def test_print_formatting(self):
def a():
pass
def b():
a()
func_cols = {
1: ("name", 48),
0: ("ncall", 5),
2: ("tsub", 8),
}
thread_cols = {
1: ("name", 48),
0: ("ttot", 8),
}
yappi.start()
a()
b()
yappi.stop()
fs = yappi.get_func_stats()
cs = fs[1].children
ts = yappi.get_thread_stats()
#fs.print_all(out=sys.stderr, columns={1:("name", 70), })
#cs.print_all(out=sys.stderr, columns=func_cols)
#ts.print_all(out=sys.stderr, columns=thread_cols)
#cs.print_all(out=sys.stderr, columns={})
self.assertRaises(
yappi.YappiError, fs.print_all, columns={1: ("namee", 9)}
)
self.assertRaises(
yappi.YappiError, cs.print_all, columns={1: ("dd", 0)}
)
self.assertRaises(
yappi.YappiError, ts.print_all, columns={1: ("tidd", 0)}
)
def test_get_clock(self):
yappi.set_clock_type('cpu')
self.assertEqual('cpu', yappi.get_clock_type())
clock_info = yappi.get_clock_info()
self.assertTrue('api' in clock_info)
self.assertTrue('resolution' in clock_info)
yappi.set_clock_type('wall')
self.assertEqual('wall', yappi.get_clock_type())
t0 = yappi.get_clock_time()
time.sleep(0.1)
duration = yappi.get_clock_time() - t0
self.assertTrue(0.05 < duration < 0.2)
def test_profile_decorator(self):
def aggregate(func, stats):
fname = "tests/%s.profile" % (func.__name__)
try:
stats.add(fname)
except IOError:
pass
stats.save(fname)
raise Exception("messing around")
@yappi.profile(return_callback=aggregate)
def a(x, y):
if x + y == 25:
raise Exception("")
return x + y
def b():
pass
try:
os.remove(
"tests/a.profile"
) # remove the one from prev test, if available
except:
pass
# global profile is on to mess things up
yappi.start()
b()
# assert functionality and call function at same time
try:
self.assertEqual(a(1, 2), 3)
except:
pass
try:
self.assertEqual(a(2, 5), 7)
except:
pass
try:
a(4, 21)
except:
pass
stats = yappi.get_func_stats().add("tests/a.profile")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(len(stats), 1) # b() should be cleared out.
@yappi.profile(return_callback=aggregate)
def count_down_rec(n):
if n == 0:
return
count_down_rec(n - 1)
try:
os.remove(
"tests/count_down_rec.profile"
) # remove the one from prev test, if available
except:
pass
try:
count_down_rec(4)
except:
pass
try:
count_down_rec(3)
except:
pass
stats = yappi.YFuncStats("tests/count_down_rec.profile")
fsrec = utils.find_stat_by_name(stats, 'count_down_rec')
self.assertEqual(fsrec.ncall, 9)
self.assertEqual(fsrec.nactualcall, 2)
def test_strip_dirs(self):
def a():
pass
stats = utils.run_and_get_func_stats(a, )
stats.strip_dirs()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.module, os.path.basename(fsa.module))
@unittest.skipIf(os.name == "nt", "do not run on Windows")
def test_run_as_script(self):
import re
p = subprocess.Popen(
['yappi', os.path.join('./tests', 'run_as_script.py')],
stdout=subprocess.PIPE
)
out, err = p.communicate()
self.assertEqual(p.returncode, 0)
func_stats, thread_stats = re.split(
b'name\\s+id\\s+tid\\s+ttot\\s+scnt\\s*\n', out
)
self.assertTrue(b'FancyThread' in thread_stats)
def test_yappi_overhead(self):
LOOP_COUNT = 100000
def a():
pass
def b():
for i in range(LOOP_COUNT):
a()
t0 = time.time()
yappi.start()
b()
yappi.stop()
time_with_yappi = time.time() - t0
t0 = time.time()
b()
time_without_yappi = time.time() - t0
if time_without_yappi == 0:
time_without_yappi = 0.000001
# in latest v0.82, I calculated this as close to "7.0" in my machine.
# however, %83 of this overhead is coming from tickcount(). The other %17
# seems to have been evenly distributed to the internal bookkeeping
# structures/algorithms which seems acceptable. Note that our test only
# tests one function being profiled at-a-time in a short interval.
# profiling high number of functions in a small time
# is a different beast, (which is pretty unlikely in most applications)
# So as a conclusion: I cannot see any optimization window for Yappi that
# is worth implementing as we will only optimize %17 of the time.
sys.stderr.write("\r\nYappi puts %0.1f times overhead to the profiled application in average.\r\n" % \
(time_with_yappi / time_without_yappi))
def test_clear_stats_while_running(self):
def a():
pass
yappi.start()
a()
yappi.clear_stats()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
self.assertEqual(fsa.ncall, 1)
def test_generator(self):
def _gen(n):
while (n > 0):
yield n
n -= 1
yappi.start()
for x in _gen(5):
pass
self.assertTrue(
yappi.convert2pstats(yappi.get_func_stats()) is not None
)
def test_slice_child_stats_and_strip_dirs(self):
def b():
for i in range(10000000):
pass
def a():
b()
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa.children[0:1] is not None)
prev_afullname = fsa.full_name
prev_bchildfullname = fsa.children[fsb].full_name
stats.strip_dirs()
self.assertTrue(len(prev_afullname) > len(fsa.full_name))
self.assertTrue(
len(prev_bchildfullname) > len(fsa.children[fsb].full_name)
)
def test_children_stat_functions(self):
_timings = {"a_1": 5, "b_1": 3, "c_1": 1}
_yappi._set_test_timings(_timings)
def b():
pass
def c():
pass
def a():
b()
c()
yappi.start()
a()
b() # non-child call
c() # non-child call
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
childs_of_a = fsa.children.get().sort("tavg", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.tavg > item.tavg)
prev_item = item
childs_of_a.sort("name", "desc")
prev_item = None
for item in childs_of_a:
if prev_item:
self.assertTrue(prev_item.name > item.name)
prev_item = item
childs_of_a.clear()
self.assertTrue(childs_of_a.empty())
def test_no_stats_different_clock_type_load(self):
def a():
pass
yappi.start()
a()
yappi.stop()
yappi.get_func_stats().save("tests/ystats1.ys")
yappi.clear_stats()
yappi.set_clock_type("WALL")
yappi.start()
yappi.stop()
stats = yappi.get_func_stats().add("tests/ystats1.ys")
fsa = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa is not None)
def test_subsequent_profile(self):
_timings = {"a_1": 1, "b_1": 1}
_yappi._set_test_timings(_timings)
def a():
pass
def b():
pass
yappi.start()
a()
yappi.stop()
yappi.start()
b()
yappi.stop()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertTrue(fsa is not None)
self.assertTrue(fsb is not None)
self.assertEqual(fsa.ttot, 1)
self.assertEqual(fsb.ttot, 1)
def test_lambda(self):
f = lambda: time.sleep(0.3)
yappi.set_clock_type("wall")
yappi.start()
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, '<lambda>')
self.assertTrue(fsa.ttot > 0.1)
def test_module_stress(self):
self.assertEqual(yappi.is_running(), False)
yappi.start()
yappi.clear_stats()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.stop()
yappi.clear_stats()
yappi.set_clock_type("cpu")
self.assertRaises(yappi.YappiError, yappi.set_clock_type, "dummy")
self.assertEqual(yappi.is_running(), False)
yappi.clear_stats()
yappi.clear_stats()
def test_stat_sorting(self):
_timings = {"a_1": 13, "b_1": 10, "a_2": 6, "b_2": 1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("avgtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tavg <= stat.tavg)
prev_stat = stat
stats = stats.sort("name", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name <= stat.name)
prev_stat = stat
stats = stats.sort("subtime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.tsub <= stat.tsub)
prev_stat = stat
self.assertRaises(
yappi.YappiError, stats.sort, "invalid_func_sorttype_arg"
)
self.assertRaises(
yappi.YappiError, stats.sort, "totaltime",
"invalid_func_sortorder_arg"
)
def test_start_flags(self):
self.assertEqual(_yappi._get_start_flags(), None)
yappi.start()
def a():
pass
a()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
self.assertEqual(len(yappi.get_thread_stats()), 1)
def test_builtin_profiling(self):
def a():
time.sleep(0.4) # is a builtin function
yappi.set_clock_type('wall')
yappi.start(builtins=True)
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'sleep')
self.assertTrue(fsa is not None)
self.assertTrue(fsa.ttot > 0.3)
yappi.stop()
yappi.clear_stats()
def a():
pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
stats = yappi.get_func_stats()
def test_singlethread_profiling(self):
yappi.set_clock_type('wall')
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(profile_threads=False)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa2.ttot > 0.1)
def test_run(self):
def profiled():
pass
yappi.clear_stats()
try:
with yappi.run():
profiled()
stats = yappi.get_func_stats()
finally:
yappi.clear_stats()
self.assertIsNotNone(utils.find_stat_by_name(stats, 'profiled'))
def test_run_recursive(self):
def profiled():
pass
def not_profiled():
pass
yappi.clear_stats()
try:
with yappi.run():
with yappi.run():
profiled()
# Profiling stopped here
not_profiled()
stats = yappi.get_func_stats()
finally:
yappi.clear_stats()
self.assertIsNotNone(utils.find_stat_by_name(stats, 'profiled'))
self.assertIsNone(utils.find_stat_by_name(stats, 'not_profiled'))
class StatSaveScenarios(utils.YappiUnitTestCase):
def test_pstats_conversion(self):
def pstat_id(fs):
return (fs.module, fs.lineno, fs.name)
def a():
d()
def b():
d()
def c():
pass
def d():
pass
_timings = {"a_1": 12, "b_1": 7, "c_1": 5, "d_1": 2}
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a, )
stats.strip_dirs()
stats.save("tests/a1.pstats", type="pstat")
fsa_pid = pstat_id(utils.find_stat_by_name(stats, "a"))
fsd_pid = pstat_id(utils.find_stat_by_name(stats, "d"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(a, )
stats.strip_dirs()
stats.save("tests/a2.pstats", type="pstat")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(b, )
stats.strip_dirs()
stats.save("tests/b1.pstats", type="pstat")
fsb_pid = pstat_id(utils.find_stat_by_name(stats, "b"))
yappi.clear_stats()
_yappi._set_test_timings(_timings)
stats = utils.run_and_get_func_stats(c, )
stats.strip_dirs()
stats.save("tests/c1.pstats", type="pstat")
fsc_pid = pstat_id(utils.find_stat_by_name(stats, "c"))
# merge saved stats and check pstats values are correct
import pstats
p = pstats.Stats(
'tests/a1.pstats', 'tests/a2.pstats', 'tests/b1.pstats',
'tests/c1.pstats'
)
p.strip_dirs()
# ct = ttot, tt = tsub
(cc, nc, tt, ct, callers) = p.stats[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 20)
self.assertEqual(ct, 24)
(cc, nc, tt, ct, callers) = p.stats[fsd_pid]
self.assertEqual(cc, nc, 3)
self.assertEqual(tt, 6)
self.assertEqual(ct, 6)
self.assertEqual(len(callers), 2)
(cc, nc, tt, ct) = callers[fsa_pid]
self.assertEqual(cc, nc, 2)
self.assertEqual(tt, 4)
self.assertEqual(ct, 4)
(cc, nc, tt, ct) = callers[fsb_pid]
self.assertEqual(cc, nc, 1)
self.assertEqual(tt, 2)
self.assertEqual(ct, 2)
def test_merge_stats(self):
_timings = {
"a_1": 15,
"b_1": 14,
"c_1": 12,
"d_1": 10,
"e_1": 9,
"f_1": 7,
"g_1": 6,
"h_1": 5,
"i_1": 1
}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
e()
def e():
f()
def f():
g()
def g():
h()
def h():
i()
def i():
pass
yappi.start()
a()
a()
yappi.stop()
stats = yappi.get_func_stats()
self.assertRaises(
NotImplementedError, stats.save, "", "INVALID_SAVE_TYPE"
)
stats.save("tests/ystats2.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
a()
stats = yappi.get_func_stats().add("tests/ystats2.ys")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
fsd = utils.find_stat_by_name(stats, "d")
fse = utils.find_stat_by_name(stats, "e")
fsf = utils.find_stat_by_name(stats, "f")
fsg = utils.find_stat_by_name(stats, "g")
fsh = utils.find_stat_by_name(stats, "h")
fsi = utils.find_stat_by_name(stats, "i")
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.tsub, 3)
self.assertEqual(fsa.children[fsb].ttot, fsb.ttot)
self.assertEqual(fsa.children[fsb].tsub, fsb.tsub)
self.assertEqual(fsb.children[fsc].ttot, fsc.ttot)
self.assertEqual(fsb.children[fsc].tsub, fsc.tsub)
self.assertEqual(fsc.tsub, 6)
self.assertEqual(fsc.children[fsd].ttot, fsd.ttot)
self.assertEqual(fsc.children[fsd].tsub, fsd.tsub)
self.assertEqual(fsd.children[fse].ttot, fse.ttot)
self.assertEqual(fsd.children[fse].tsub, fse.tsub)
self.assertEqual(fse.children[fsf].ttot, fsf.ttot)
self.assertEqual(fse.children[fsf].tsub, fsf.tsub)
self.assertEqual(fsf.children[fsg].ttot, fsg.ttot)
self.assertEqual(fsf.children[fsg].tsub, fsg.tsub)
self.assertEqual(fsg.ttot, 18)
self.assertEqual(fsg.tsub, 3)
self.assertEqual(fsg.children[fsh].ttot, fsh.ttot)
self.assertEqual(fsg.children[fsh].tsub, fsh.tsub)
self.assertEqual(fsh.ttot, 15)
self.assertEqual(fsh.tsub, 12)
self.assertEqual(fsh.tavg, 5)
self.assertEqual(fsh.children[fsi].ttot, fsi.ttot)
self.assertEqual(fsh.children[fsi].tsub, fsi.tsub)
#stats.debug_print()
def test_merge_multithreaded_stats(self):
import _yappi
timings = {"a_1": 2, "b_1": 1}
_yappi._set_test_timings(timings)
def a():
pass
def b():
pass
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
t = threading.Thread(target=b)
t.start()
t.join()
yappi.get_func_stats().save("tests/ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(timings)
self.assertEqual(len(yappi.get_func_stats()), 0)
self.assertEqual(len(yappi.get_thread_stats()), 1)
t = threading.Thread(target=a)
t.start()
t.join()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multithread"], 1)
yappi.get_func_stats().save("tests/ystats2.ys")
stats = yappi.YFuncStats([
"tests/ystats1.ys",
"tests/ystats2.ys",
])
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsb.ncall, 1)
self.assertEqual(fsa.tsub, fsa.ttot, 4)
self.assertEqual(fsb.tsub, fsb.ttot, 1)
def test_merge_load_different_clock_types(self):
yappi.start(builtins=True)
def a():
b()
def b():
c()
def c():
pass
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().sort("name", "asc").save("tests/ystats1.ys")
yappi.stop()
yappi.clear_stats()
yappi.start(builtins=False)
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("tests/ystats2.ys")
yappi.stop()
self.assertRaises(_yappi.error, yappi.set_clock_type, "wall")
yappi.clear_stats()
yappi.set_clock_type("wall")
yappi.start()
t = threading.Thread(target=a)
t.start()
t.join()
yappi.get_func_stats().save("tests/ystats3.ys")
self.assertRaises(
yappi.YappiError,
yappi.YFuncStats().add("tests/ystats1.ys").add, "tests/ystats3.ys"
)
stats = yappi.YFuncStats(["tests/ystats1.ys",
"tests/ystats2.ys"]).sort("name")
fsa = utils.find_stat_by_name(stats, "a")
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsa.ncall, 2)
self.assertEqual(fsa.ncall, fsb.ncall, fsc.ncall)
def test_merge_aabab_aabbc(self):
_timings = {
"a_1": 15,
"a_2": 14,
"b_1": 12,
"a_3": 10,
"b_2": 9,
"c_1": 4
}
_yappi._set_test_timings(_timings)
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 5:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 2:
self._ncall += 1
a()
elif self._ncall == 6:
self._ncall += 1
b()
elif self._ncall == 7:
c()
else:
return
def c():
pass
self._ncall = 1
stats = utils.run_and_get_func_stats(a, )
stats.save("tests/ystats1.ys")
yappi.clear_stats()
_yappi._set_test_timings(_timings)
#stats.print_all()
self._ncall = 5
stats = utils.run_and_get_func_stats(a, )
stats.save("tests/ystats2.ys")
#stats.print_all()
def a(): # same name but another function(code object)
pass
yappi.start()
a()
stats = yappi.get_func_stats().add(
["tests/ystats1.ys", "tests/ystats2.ys"]
)
#stats.print_all()
self.assertEqual(len(stats), 4)
fsa = None
for stat in stats:
if stat.name == "a" and stat.ttot == 45:
fsa = stat
break
self.assertTrue(fsa is not None)
self.assertEqual(fsa.ncall, 7)
self.assertEqual(fsa.nactualcall, 3)
self.assertEqual(fsa.ttot, 45)
self.assertEqual(fsa.tsub, 10)
fsb = utils.find_stat_by_name(stats, "b")
fsc = utils.find_stat_by_name(stats, "c")
self.assertEqual(fsb.ncall, 6)
self.assertEqual(fsb.nactualcall, 3)
self.assertEqual(fsb.ttot, 36)
self.assertEqual(fsb.tsub, 27)
self.assertEqual(fsb.tavg, 6)
self.assertEqual(fsc.ttot, 8)
self.assertEqual(fsc.tsub, 8)
self.assertEqual(fsc.tavg, 4)
self.assertEqual(fsc.nactualcall, fsc.ncall, 2)
class MultithreadedScenarios(utils.YappiUnitTestCase):
def test_issue_32(self):
'''
Start yappi from different thread and we get Internal Error(15) as
the current_ctx_id() called while enumerating the threads in start()
and as it does not swap to the enumerated ThreadState* the THreadState_GetDict()
returns wrong object and thus sets an invalid id for the _ctx structure.
When this issue happens multiple Threads have same tid as the internal ts_ptr
will be same for different contexts. So, let's see if that happens
'''
def foo():
time.sleep(0.2)
def bar():
time.sleep(0.1)
def thread_func():
yappi.set_clock_type("wall")
yappi.start()
bar()
t = threading.Thread(target=thread_func)
t.start()
t.join()
foo()
yappi.stop()
thread_ids = set()
for tstat in yappi.get_thread_stats():
self.assertTrue(tstat.tid not in thread_ids)
thread_ids.add(tstat.tid)
def test_subsequent_profile(self):
WORKER_COUNT = 5
def a():
pass
def b():
pass
def c():
pass
_timings = {
"a_1": 3,
"b_1": 2,
"c_1": 1,
}
yappi.start()
def g():
pass
g()
yappi.stop()
yappi.clear_stats()
_yappi._set_test_timings(_timings)
yappi.start()
_dummy = []
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=b)
t.start()
_dummy.append(t)
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=a)
t.start()
t.join()
for i in range(WORKER_COUNT):
t = threading.Thread(target=c)
t.start()
t.join()
yappi.stop()
yappi.start()
def f():
pass
f()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall, 10)
self.assertEqual(fsb.ncall, 5)
self.assertEqual(fsc.ncall, 5)
self.assertEqual(fsa.ttot, fsa.tsub, 30)
self.assertEqual(fsb.ttot, fsb.tsub, 10)
self.assertEqual(fsc.ttot, fsc.tsub, 5)
# MACOSx optimizes by only creating one worker thread
self.assertTrue(len(yappi.get_thread_stats()) >= 2)
def test_basic(self):
yappi.set_clock_type('wall')
def dummy():
pass
def a():
time.sleep(0.2)
class Worker1(threading.Thread):
def a(self):
time.sleep(0.3)
def run(self):
self.a()
yappi.start(builtins=False, profile_threads=True)
c = Worker1()
c.start()
c.join()
a()
stats = yappi.get_func_stats()
fsa1 = utils.find_stat_by_name(stats, 'Worker1.a')
fsa2 = utils.find_stat_by_name(stats, 'a')
self.assertTrue(fsa1 is not None)
self.assertTrue(fsa2 is not None)
self.assertTrue(fsa1.ttot > 0.2)
self.assertTrue(fsa2.ttot > 0.1)
tstats = yappi.get_thread_stats()
self.assertEqual(len(tstats), 2)
tsa = utils.find_stat_by_name(tstats, 'Worker1')
tsm = utils.find_stat_by_name(tstats, '_MainThread')
dummy() # call dummy to force ctx name to be retrieved again.
self.assertTrue(tsa is not None)
# TODO: I put dummy() to fix below, remove the comments after a while.
self.assertTrue( # FIX: I see this fails sometimes?
tsm is not None,
'Could not find "_MainThread". Found: %s' % (', '.join(utils.get_stat_names(tstats))))
def test_ctx_stats(self):
from threading import Thread
DUMMY_WORKER_COUNT = 5
yappi.start()
class DummyThread(Thread):
pass
def dummy():
pass
def dummy_worker():
pass
for i in range(DUMMY_WORKER_COUNT):
t = DummyThread(target=dummy_worker)
t.start()
t.join()
yappi.stop()
stats = yappi.get_thread_stats()
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is not None)
yappi.clear_stats()
time.sleep(1.0)
_timings = {
"a_1": 6,
"b_1": 5,
"c_1": 3,
"d_1": 1,
"a_2": 4,
"b_2": 3,
"c_2": 2,
"d_2": 1
}
_yappi._set_test_timings(_timings)
class Thread1(Thread):
pass
class Thread2(Thread):
pass
def a():
b()
def b():
c()
def c():
d()
def d():
time.sleep(0.6)
yappi.set_clock_type("wall")
yappi.start()
t1 = Thread1(target=a)
t1.start()
t2 = Thread2(target=a)
t2.start()
t1.join()
t2.join()
stats = yappi.get_thread_stats()
# the fist clear_stats clears the context table?
tsa = utils.find_stat_by_name(stats, "DummyThread")
self.assertTrue(tsa is None)
tst1 = utils.find_stat_by_name(stats, "Thread1")
tst2 = utils.find_stat_by_name(stats, "Thread2")
tsmain = utils.find_stat_by_name(stats, "_MainThread")
dummy() # call dummy to force ctx name to be retrieved again.
self.assertTrue(len(stats) == 3)
self.assertTrue(tst1 is not None)
self.assertTrue(tst2 is not None)
# TODO: I put dummy() to fix below, remove the comments after a while.
self.assertTrue( # FIX: I see this fails sometimes
tsmain is not None,
'Could not find "_MainThread". Found: %s' % (', '.join(utils.get_stat_names(stats))))
self.assertTrue(1.0 > tst2.ttot >= 0.5)
self.assertTrue(1.0 > tst1.ttot >= 0.5)
# test sorting of the ctx stats
stats = stats.sort("totaltime", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot >= stat.ttot)
prev_stat = stat
stats = stats.sort("totaltime", "asc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.ttot <= stat.ttot)
prev_stat = stat
stats = stats.sort("schedcount", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.sched_count >= stat.sched_count)
prev_stat = stat
stats = stats.sort("name", "desc")
prev_stat = None
for stat in stats:
if prev_stat:
self.assertTrue(prev_stat.name.lower() >= stat.name.lower())
prev_stat = stat
self.assertRaises(
yappi.YappiError, stats.sort, "invalid_thread_sorttype_arg"
)
self.assertRaises(
yappi.YappiError, stats.sort, "invalid_thread_sortorder_arg"
)
def test_producer_consumer_with_queues(self):
# we currently just stress yappi, no functionality test is done here.
yappi.start()
if utils.is_py3x():
from queue import Queue
else:
from Queue import Queue
from threading import Thread
WORKER_THREAD_COUNT = 50
WORK_ITEM_COUNT = 2000
def worker():
while True:
item = q.get()
# do the work with item
q.task_done()
q = Queue()
for i in range(WORKER_THREAD_COUNT):
t = Thread(target=worker)
t.daemon = True
t.start()
for item in range(WORK_ITEM_COUNT):
q.put(item)
q.join() # block until all tasks are done
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
def test_temporary_lock_waiting(self):
yappi.start()
_lock = threading.Lock()
def worker():
_lock.acquire()
try:
time.sleep(1.0)
finally:
_lock.release()
t1 = threading.Thread(target=worker)
t2 = threading.Thread(target=worker)
t1.start()
t2.start()
t1.join()
t2.join()
#yappi.get_func_stats().sort("callcount").print_all()
yappi.stop()
@unittest.skipIf(os.name != "posix", "requires Posix compliant OS")
def test_signals_with_blocking_calls(self):
import signal, os, time
# just to verify if signal is handled correctly and stats/yappi are not corrupted.
def handler(signum, frame):
raise Exception("Signal handler executed!")
yappi.start()
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
self.assertRaises(Exception, time.sleep, 2)
stats = yappi.get_func_stats()
fsh = utils.find_stat_by_name(stats, "handler")
self.assertTrue(fsh is not None)
@unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_concurrent_futures(self):
yappi.start()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=5) as executor:
f = executor.submit(pow, 5, 2)
self.assertEqual(f.result(), 25)
time.sleep(1.0)
yappi.stop()
@unittest.skipIf(not sys.version_info >= (3, 2), "requires Python 3.2")
def test_barrier(self):
yappi.start()
b = threading.Barrier(2, timeout=1)
def worker():
try:
b.wait()
except threading.BrokenBarrierError:
pass
except Exception:
raise Exception("BrokenBarrierError not raised")
t1 = threading.Thread(target=worker)
t1.start()
#b.wait()
t1.join()
yappi.stop()
class NonRecursiveFunctions(utils.YappiUnitTestCase):
def test_abcd(self):
_timings = {"a_1": 6, "b_1": 5, "c_1": 3, "d_1": 1}
_yappi._set_test_timings(_timings)
def a():
b()
def b():
c()
def c():
d()
def d():
pass
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfscd = fsc.children[fsd]
self.assertEqual(fsa.ttot, 6)
self.assertEqual(fsa.tsub, 1)
self.assertEqual(fsb.ttot, 5)
self.assertEqual(fsb.tsub, 2)
self.assertEqual(fsc.ttot, 3)
self.assertEqual(fsc.tsub, 2)
self.assertEqual(fsd.ttot, 1)
self.assertEqual(fsd.tsub, 1)
self.assertEqual(cfsab.ttot, 5)
self.assertEqual(cfsab.tsub, 2)
self.assertEqual(cfsbc.ttot, 3)
self.assertEqual(cfsbc.tsub, 2)
self.assertEqual(cfscd.ttot, 1)
self.assertEqual(cfscd.tsub, 1)
def test_stop_in_middle(self):
_timings = {"a_1": 6, "b_1": 4}
_yappi._set_test_timings(_timings)
def a():
b()
yappi.stop()
def b():
time.sleep(0.2)
yappi.start()
a()
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
self.assertEqual(fsa.ncall, 1)
self.assertEqual(fsa.nactualcall, 0)
self.assertEqual(fsa.ttot, 0) # no call_leave called
self.assertEqual(fsa.tsub, 0) # no call_leave called
self.assertEqual(fsb.ttot, 4)
class RecursiveFunctions(utils.YappiUnitTestCase):
def test_fibonacci(self):
def fib(n):
if n > 1:
return fib(n - 1) + fib(n - 2)
else:
return n
stats = utils.run_and_get_func_stats(fib, 22)
fs = utils.find_stat_by_name(stats, 'fib')
self.assertEqual(fs.ncall, 57313)
self.assertEqual(fs.ttot, fs.tsub)
def test_abcadc(self):
_timings = {
"a_1": 20,
"b_1": 19,
"c_1": 17,
"a_2": 13,
"d_1": 12,
"c_2": 10,
"a_3": 5
}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
if n == 1 + 1:
d(n)
else:
b(n)
def b(n):
c(n)
def c(n):
a(n + 1)
def d(n):
c(n)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 1)
self.assertEqual(fsa.ttot, 20)
self.assertEqual(fsa.tsub, 7)
self.assertEqual(fsb.ttot, 19)
self.assertEqual(fsb.tsub, 2)
self.assertEqual(fsc.ttot, 17)
self.assertEqual(fsc.tsub, 9)
self.assertEqual(fsd.ttot, 12)
self.assertEqual(fsd.tsub, 2)
cfsca = fsc.children[fsa]
self.assertEqual(cfsca.nactualcall, 0)
self.assertEqual(cfsca.ncall, 2)
self.assertEqual(cfsca.ttot, 13)
self.assertEqual(cfsca.tsub, 6)
def test_aaaa(self):
_timings = {"d_1": 9, "d_2": 7, "d_3": 3, "d_4": 2}
_yappi._set_test_timings(_timings)
def d(n):
if n == 3:
return
d(n + 1)
stats = utils.run_and_get_func_stats(d, 0)
fsd = utils.find_stat_by_name(stats, 'd')
self.assertEqual(fsd.ncall, 4)
self.assertEqual(fsd.nactualcall, 1)
self.assertEqual(fsd.ttot, 9)
self.assertEqual(fsd.tsub, 9)
cfsdd = fsd.children[fsd]
self.assertEqual(cfsdd.ttot, 7)
self.assertEqual(cfsdd.tsub, 7)
self.assertEqual(cfsdd.ncall, 3)
self.assertEqual(cfsdd.nactualcall, 0)
def test_abcabc(self):
_timings = {
"a_1": 20,
"b_1": 19,
"c_1": 17,
"a_2": 13,
"b_2": 11,
"c_2": 9,
"a_3": 6
}
_yappi._set_test_timings(_timings)
def a(n):
if n == 3:
return
else:
b(n)
def b(n):
c(n)
def c(n):
a(n + 1)
stats = utils.run_and_get_func_stats(a, 1)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
self.assertEqual(fsa.ncall, 3)
self.assertEqual(fsa.nactualcall, 1)
self.assertEqual(fsa.ttot, 20)
self.assertEqual(fsa.tsub, 9)
self.assertEqual(fsb.ttot, 19)
self.assertEqual(fsb.tsub, 4)
self.assertEqual(fsc.ttot, 17)
self.assertEqual(fsc.tsub, 7)
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(cfsab.ttot, 19)
self.assertEqual(cfsab.tsub, 4)
self.assertEqual(cfsbc.ttot, 17)
self.assertEqual(cfsbc.tsub, 7)
self.assertEqual(cfsca.ttot, 13)
self.assertEqual(cfsca.tsub, 8)
def test_abcbca(self):
_timings = {"a_1": 10, "b_1": 9, "c_1": 7, "b_2": 4, "c_2": 2, "a_2": 1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
else:
return
def b():
c()
def c():
if self._ncall == 1:
self._ncall += 1
b()
else:
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc]
cfsca = fsc.children[fsa]
self.assertEqual(fsa.ttot, 10)
self.assertEqual(fsa.tsub, 2)
self.assertEqual(fsb.ttot, 9)
self.assertEqual(fsb.tsub, 4)
self.assertEqual(fsc.ttot, 7)
self.assertEqual(fsc.tsub, 4)
self.assertEqual(cfsab.ttot, 9)
self.assertEqual(cfsab.tsub, 2)
self.assertEqual(cfsbc.ttot, 7)
self.assertEqual(cfsbc.tsub, 4)
self.assertEqual(cfsca.ttot, 1)
self.assertEqual(cfsca.tsub, 1)
self.assertEqual(cfsca.ncall, 1)
self.assertEqual(cfsca.nactualcall, 0)
def test_aabccb(self):
_timings = {
"a_1": 13,
"a_2": 11,
"b_1": 9,
"c_1": 5,
"c_2": 3,
"b_2": 1
}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
else:
b()
def b():
if self._ncall == 3:
return
else:
c()
def c():
if self._ncall == 2:
self._ncall += 1
c()
else:
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
fsc = utils.find_stat_by_name(stats, 'c')
cfsaa = fsa.children[fsa.index]
cfsab = fsa.children[fsb]
cfsbc = fsb.children[fsc.full_name]
cfscc = fsc.children[fsc]
cfscb = fsc.children[fsb]
self.assertEqual(fsb.ttot, 9)
self.assertEqual(fsb.tsub, 5)
self.assertEqual(cfsbc.ttot, 5)
self.assertEqual(cfsbc.tsub, 2)
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 4)
self.assertEqual(cfsab.ttot, 9)
self.assertEqual(cfsab.tsub, 4)
self.assertEqual(cfsaa.ttot, 11)
self.assertEqual(cfsaa.tsub, 2)
self.assertEqual(fsc.ttot, 5)
self.assertEqual(fsc.tsub, 4)
def test_abaa(self):
_timings = {"a_1": 13, "b_1": 10, "a_2": 9, "a_3": 5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
elif self._ncall == 2:
self._ncall += 1
a()
else:
return
def b():
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsba = fsb.children[fsa]
self.assertEqual(fsb.ttot, 10)
self.assertEqual(fsb.tsub, 1)
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 12)
self.assertEqual(cfsaa.ttot, 5)
self.assertEqual(cfsaa.tsub, 5)
self.assertEqual(cfsba.ttot, 9)
self.assertEqual(cfsba.tsub, 4)
def test_aabb(self):
_timings = {"a_1": 13, "a_2": 10, "b_1": 9, "b_2": 5}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
self._ncall += 1
a()
elif self._ncall == 2:
b()
else:
return
def b():
if self._ncall == 2:
self._ncall += 1
b()
else:
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 4)
self.assertEqual(fsb.ttot, 9)
self.assertEqual(fsb.tsub, 9)
self.assertEqual(cfsaa.ttot, 10)
self.assertEqual(cfsaa.tsub, 1)
self.assertEqual(cfsab.ttot, 9)
self.assertEqual(cfsab.tsub, 4)
self.assertEqual(cfsbb.ttot, 5)
self.assertEqual(cfsbb.tsub, 5)
def test_abbb(self):
_timings = {"a_1": 13, "b_1": 10, "b_2": 6, "b_3": 1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 1:
b()
def b():
if self._ncall == 3:
return
self._ncall += 1
b()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsbb = fsb.children[fsb]
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 3)
self.assertEqual(fsb.ttot, 10)
self.assertEqual(fsb.tsub, 10)
self.assertEqual(fsb.ncall, 3)
self.assertEqual(fsb.nactualcall, 1)
self.assertEqual(cfsab.ttot, 10)
self.assertEqual(cfsab.tsub, 4)
self.assertEqual(cfsbb.ttot, 6)
self.assertEqual(cfsbb.tsub, 6)
self.assertEqual(cfsbb.nactualcall, 0)
self.assertEqual(cfsbb.ncall, 2)
def test_aaab(self):
_timings = {"a_1": 13, "a_2": 10, "a_3": 6, "b_1": 1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
if self._ncall == 3:
b()
return
self._ncall += 1
a()
def b():
return
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsaa = fsa.children[fsa]
cfsab = fsa.children[fsb]
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 12)
self.assertEqual(fsb.ttot, 1)
self.assertEqual(fsb.tsub, 1)
self.assertEqual(cfsaa.ttot, 10)
self.assertEqual(cfsaa.tsub, 9)
self.assertEqual(cfsab.ttot, 1)
self.assertEqual(cfsab.tsub, 1)
def test_abab(self):
_timings = {"a_1": 13, "b_1": 10, "a_2": 6, "b_2": 1}
_yappi._set_test_timings(_timings)
self._ncall = 1
def a():
b()
def b():
if self._ncall == 2:
return
self._ncall += 1
a()
stats = utils.run_and_get_func_stats(a)
fsa = utils.find_stat_by_name(stats, 'a')
fsb = utils.find_stat_by_name(stats, 'b')
cfsab = fsa.children[fsb]
cfsba = fsb.children[fsa]
self.assertEqual(fsa.ttot, 13)
self.assertEqual(fsa.tsub, 8)
self.assertEqual(fsb.ttot, 10)
self.assertEqual(fsb.tsub, 5)
self.assertEqual(cfsab.ttot, 10)
self.assertEqual(cfsab.tsub, 5)
self.assertEqual(cfsab.ncall, 2)
self.assertEqual(cfsab.nactualcall, 1)
self.assertEqual(cfsba.ttot, 6)
self.assertEqual(cfsba.tsub, 5)
if __name__ == '__main__':
# import sys;sys.argv = ['', 'BasicUsage.test_run_as_script']
# import sys;sys.argv = ['', 'MultithreadedScenarios.test_subsequent_profile']
unittest.main()
|
Win10CleanApp.py
|
import tkinter as tk
import tkinter.ttk as ttk
import os
from tkinter import simpledialog
import shutil
import winreg
import subprocess
import threading
import time
import ctypes
import enum
import sys
class Cleaner():
def __init__(self, root):
'''window & app settings'''
self.root = root
root.title("Win 10 Cleaner")
root.resizable(False, False)
self.widgets()
win_width = root.winfo_reqwidth()
win_hight = root.winfo_reqheight()
pos_right = int(root.winfo_screenwidth() / 3 - win_width / 3)
pos_down = int(root.winfo_screenheight() / 3 - win_hight / 3)
root.geometry("800x450+{}+{}".format(pos_right, pos_down))
root.iconbitmap("icon.ico")
self.freeDiskBefore=''
self.freeDiskAfter=''
self.progress = True
self.psStatus = ''
self.debug=0
def widgets(self):
'''main window widgets'''
self.text = 'Win 10 Cleaner'
self.bt_text = 'Εκκίνηση'
self.font = 'Arial 15 bold'
self.frame = tk.Frame(self.root)
self.frame.pack(expand=1, fill='both')
self.canvas = tk.Canvas(self.frame, bg='gray')
self.canvas.pack(expand=1, fill='both')
#dimiourgia antikimenon ston camva
self.image_bg = tk.PhotoImage(file='image.gif')
self.canvas.create_image(0, 0, image=self.image_bg, anchor='nw')
self.pg_bar = ttk.Progressbar(self.canvas, orient = 'horizontal', length = 500, mode = 'determinate')
self.pg_bar.pack()
self.button1 = tk.Button(self.canvas, text=self.bt_text, font='Arial 12', command=self.start, width=15, anchor='s') #self.clean
self.button1.pack()
self.button2 = tk.Button(self.canvas, text='Έξοδος', font='Arial 12', command=self.root.destroy, width=10, anchor='s')
self.button2.pack()
self.buttoni = tk.Button(self.canvas, text='About', font='Arial 8 bold', width=6, command=self.info, anchor='s')
self.buttoni.pack()
#topothetish antikimenon ston camva
self.pos_text = self.canvas.create_text(400, 150, text=self.text, font=self.font, width=400, anchor='n', fill='black')
self.pos_pg_bar = self.canvas.create_window(400, 250, anchor='s', window=self.pg_bar)
self.pos_b1 = self.canvas.create_window(400,300, anchor='s', window=self.button1)
self.pos_b2 = self.canvas.create_window(750, 400, anchor='se', window=self.button2)
self.pos_bi = self.canvas.create_window(797, 3, anchor='ne', window=self.buttoni)
def progressBar(self):
'''creates progress bar'''
if self.freeDiskBefore != '':
self.canvas.delete(self.endMSG)
self.pg_bar['value'] = 0
self.canvas.itemconfigure(self.pos_pg_bar, state='normal') # canvas.itemconfigure(id, state='hidden'/'normal')
self.progress = True
self.freeDiskBefore = ''
self.freeDiskAfter = ''
def refresh(self):
'''refresh threads'''
self.root.update()
self.root.after(100, self.refresh)
def start(self):
'''starts cleaning threads'''
self.button1['state'] = 'disabled'
self.progressBar()
self.disk_size()
self.start1()
self.start2()
self.registryPaths()
self.start3()
while self.th.is_alive():
if self.debug == 1: print("waiting...")
time.sleep(1)
self.refresh()
if self.th.is_alive() != True:
self.disk_size()
self.endMessage()
self.button1['state'] = 'normal'
def endMessage(self):
'''end message'''
spacefree = abs(int(self.freeDiskBefore)-int(self.freeDiskAfter))
endMSG = f'Ελευθερώθηκαν {spacefree} ΜΒ από τον δίσκο'
self.canvas.itemconfigure(self.pos_pg_bar,state='hidden')#canvas.itemconfigure(id, state='hidden'/'normal')
self.endMSG = self.canvas.create_text(400, 200, text=endMSG, font=self.font, width=400, anchor='n',fill='black')
def start1(self):
'''temporary folder files delete'''
self.refresh()
threading.Thread(target=self.temp, daemon=True).start()
threading.Thread(target=self.progress_bar(os.listdir(os.environ['temp'])), daemon=True).start()
def start2(self):
'''windows temporary folder files delete'''
self.refresh()
threading.Thread(target=self.win_temp, daemon=True).start()
threading.Thread(target=self.progress_bar(os.listdir(os.environ['Windir']+'\Temp')), daemon=True).start()
def start3(self):
'''starts proccess for windows clean manager'''
self.refresh()
if self.debug == 1: print('here runs on start 3 before process')
self.th = threading.Thread(target=self.clean_manager, daemon=True)
self.th.start()
if self.debug == 1: print('here runs on start 3 after process')
def info(self):
'''info about app'''
simpledialog.messagebox.showinfo('About', 'Win 10 Cleaner v2.1\nCredits: \nΚωνσταντίνος Καρακασίδης')
def clean_manager(self):
'''execute windows clean manager with setted atributes'''
subprocess.run(["cleanmgr", "/sagerun:1929"])
if self.debug == 1: print('complete')
def registryPaths(self):
'''adds registry key for use from clean_manager function'''
regpath = [r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Active Setup Temp Folders',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\BranchCache',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Downloaded Program Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Internet Cache Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Memory Dump Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Old ChkDsk Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Previous Installations',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Recycle Bin',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Service Pack Cleanup',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Setup Log Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\System error memory dump files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\System error minidump files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Temporary Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Temporary Setup Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Thumbnail Cache',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Update Cleanup',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Upgrade Discarded Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\User file versions',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Defender',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Error Reporting Archive Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Error Reporting Queue Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Error Reporting System Archive Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Error Reporting System Queue Files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows ESD installation files',
r'Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\Windows Upgrade Log Files']
for key in regpath:
winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, key)
key2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key, 0,winreg.KEY_WRITE)
winreg.SetValueEx(key2,'StateFlags1926',0,winreg.REG_DWORD,2)
winreg.CloseKey(key2)
def temp(self):
'''deletes user temp files'''
self.deleting = shutil.rmtree(os.environ['temp'],ignore_errors=True)
self.progress = False
def win_temp(self):
'''deletes windows temp files'''
self.progress = True
self.deleting = shutil.rmtree(os.environ['Windir']+'\Temp', ignore_errors=True)
self.progress = False
def progress_bar(self,DIR):
'''progress bar update of temp function'''
if self.debug== 1:print(len(DIR))
beforedelete=len(DIR)
while self.progress==True:
curentValue = len(DIR)
value = (curentValue - beforedelete) * 100
if beforedelete <20:
self.pg_bar['value'] = 100
self.pg_bar['value'] = 100-value
time.sleep(0.1)
if self.debug== 1:print(self.pg_bar['value'],'progress bar value')
def disk_size(self):
total, used, free = shutil.disk_usage("/")
if self.freeDiskBefore == '':
self.freeDiskBefore = f'{free // (2 ** 20)}'
else:
self.freeDiskAfter = f'{free // (2 ** 20)}'
if self.debug == 1:
print(f"Total: {total / (2 ** 20):.2f} MB")
print(f"Used: {used / (2 ** 20):.2f} MB")
print(f"Free: {free / (2 ** 20):.2f} MB")
print(self.freeDiskBefore, 'Before MB')
print(self.freeDiskAfter, 'After MB')
# ------------------------------MS code for admin privileges start------------------------------------------------------
class SW(enum.IntEnum):
HIDE = 0
MAXIMIZE = 3
MINIMIZE = 6
RESTORE = 9
SHOW = 5
SHOWDEFAULT = 10
SHOWMAXIMIZED = 3
SHOWMINIMIZED = 2
SHOWMINNOACTIVE = 7
SHOWNA = 8
SHOWNOACTIVATE = 4
SHOWNORMAL = 1
class ERROR(enum.IntEnum):
ZERO = 0
FILE_NOT_FOUND = 2
PATH_NOT_FOUND = 3
BAD_FORMAT = 11
ACCESS_DENIED = 5
ASSOC_INCOMPLETE = 27
DDE_BUSY = 30
DDE_FAIL = 29
DDE_TIMEOUT = 28
DLL_NOT_FOUND = 32
NO_ASSOC = 31
OOM = 8
SHARE = 26
def bootstrap():
if ctypes.windll.shell32.IsUserAnAdmin():
root = tk.Tk()#apo edo ksekinaei to programa mou
Cleaner(root)#apo edo ksekinaei to programa mou
threading.Thread(target=root.mainloop(), daemon=True).start()#apo edo ksekinaei to programa mou
else:
hinstance = ctypes.windll.shell32.ShellExecuteW(
None, 'runas', sys.executable, sys.argv[0], None, SW.SHOWNORMAL
)
if hinstance <= 32:
raise RuntimeError(ERROR(hinstance))
# ------------------------------MS code for admin privileges end--------------------------------------------------------
if __name__ == '__main__':
app = bootstrap()
|
forch_proxy.py
|
"""Module for proxy server to aggregate and serve data"""
import functools
import threading
import requests
from forch.http_server import HttpServer
from forch.utils import get_logger
LOGGER = get_logger('proxy')
DEFAULT_PROXY_PORT = 8080
LOCALHOST = '0.0.0.0'
class ForchProxy():
"""Class that implements the module that creates a proxy server"""
def __init__(self, proxy_config, content_type=None):
self._proxy_config = proxy_config
self._proxy_port = self._proxy_config.proxy_port or DEFAULT_PROXY_PORT
self._pages = {}
self._proxy_server = None
self._content_type = content_type
def start(self):
"""Start proxy server"""
self._register_pages()
self._proxy_server = HttpServer(self._proxy_port, content_type=self._content_type)
try:
self._proxy_server.map_request('', self._get_path_data)
except Exception as e:
self._proxy_server.map_request('', functools.partial(self._show_error, e))
finally:
threading.Thread(target=self._proxy_server.start_server(), daemon=True).start()
LOGGER.info('Started proxy server on port %s', self._proxy_port)
def stop(self):
"""Kill server"""
LOGGER.info('Stopping proxy server')
self._proxy_server.stop_server()
def _get_url(self, server, port):
return 'http://' + str(server) + ':' + str(port)
def _register_page(self, path, server, port):
self._pages[path] = self._get_url(server, port)
def _register_pages(self):
for name, target in self._proxy_config.targets.items():
self._register_page(name, LOCALHOST, target.port)
def _get_proxy_help(self):
"""Display proxy help"""
help_str = 'Following paths are supported:\n\n\t'
for target in self._proxy_config.targets:
help_str += '/' + target + '\n\t'
return help_str
def _get_path_data(self, path, params):
path = '/'.join(path.split('/')[1:])
url = self._pages.get(path)
if not url:
return self._get_proxy_help()
try:
data = requests.get(url)
except requests.exceptions.RequestException as e:
return "Error retrieving data from url %s: %s" % (url, str(e))
return data.content.decode('utf-8')
def _show_error(self, error, path, params):
"""Display errors"""
return f"Error creating proxy server: {str(error)}"
|
test_threading.py
|
'''
Testing for race conditions and deadlocks.
'''
from destructure import *
from destructure import Match
import random
from threading import Thread
import time
import unittest
class FuzzyBinding(Binding):
'Delays getting an unbound attribute to reveal race conditions'
def __setattr__(self, name, value):
if not isinstance(getattr(self, name), Unbound):
fmt = 'name {name!r} has already been bound to {value!r}'
raise BindError(fmt.format(name=name, value=value))
time.sleep(random.random()/10)
super(Binding, self).__setattr__(name, value)
class NoLockMatch(Match):
def acquire_binding_lock(self):
pass
class BindingDeadlockTestCase(unittest.TestCase):
'''
Test for two matches and two bindings causing deadlock.
Multiple Binding objects in a single schema is not supported.
To avoid deadlock in such a case, we'd need to traverse the schema
looking for all Binding objects and acquire all their locks before
doing any work. What a pain...
'''
def test_no_lock(self):
errors = self.deadlock(NoLockMatch().match)
self.assertEqual(2, sum(errors))
@unittest.skip("Schemas may not have multiple Binding objects")
def test_with_lock(self):
blocked = self.deadlock(match)
self.assertFalse(blocked)
def deadlock(self, match):
errors = []
a = FuzzyBinding()
b = FuzzyBinding()
schema1 = [a.x, b.x]
schema2 = [b.y, a.y]
data = [1, 2]
def worker(schema, data):
try:
match(schema, data)
except SchemaError:
errors.append(True)
else:
errors.append(False)
t1 = Thread(target=worker, args=(schema1, data))
t2 = Thread(target=worker, args=(schema2, data))
def monitor():
time.sleep(15)
raise RuntimeError('deadlock suspected, please stop test')
m = Thread(target=monitor)
m.daemon = True
m.start()
threads = [t1, t2]
for t in threads:
t.start()
for t in threads:
t.join()
return errors
class BindingRaceTestCase(unittest.TestCase):
'test for two matches racing to bind a name'
def test_no_lock(self):
errors = self.race(NoLockMatch().match)
self.assertEqual(0, sum(errors))
def test_with_lock(self):
errors = self.race(match)
self.assertEqual(1, sum(errors))
def race(self, match):
errors = []
o = FuzzyBinding()
schema = o.x
data = 1
def worker():
try:
match(schema, data)
except BindError:
errors.append(True)
else:
errors.append(False)
threads = [Thread(target=worker) for i in range(2)]
for t in threads:
t.start()
for t in threads:
t.join()
return errors
if __name__ == '__main__':
unittest.main()
|
preparer.py
|
# -*- coding: utf-8 -*-
# Copyright CERN since 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import threading
from time import time
from typing import TYPE_CHECKING
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.exception import RucioException
from rucio.common.logging import setup_logging
from rucio.core.request import preparer_update_requests, reduce_requests, sort_requests_minimum_distance, \
get_transfertool_filter, get_supported_transfertools, rse_lookup_filter, list_transfer_requests_and_source_replicas
from rucio.db.sqla.constants import RequestState
from rucio.daemons.common import run_daemon
if TYPE_CHECKING:
from typing import Optional
from sqlalchemy.orm import Session
from rucio.daemons.common import HeartbeatHandler
graceful_stop = threading.Event()
def stop():
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, sleep_time=10, bulk=100):
"""
Running the preparer daemon either once or by default in a loop until stop is called.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
def preparer_kwargs():
# not sure if this is needed for threading.Thread, but it always returns a fresh dictionary
return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk}
threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)]
for thr in threads:
thr.start()
all_running = True
while all_running:
for thr in threads:
thr.join(timeout=3.14)
if not thr.is_alive() or graceful_stop.is_set():
all_running = False
break
if graceful_stop.is_set() or once:
logging.info('conveyor-preparer: gracefully stopping')
else:
logging.warning('conveyor-preparer: stopping out of the ordinary')
graceful_stop.set()
for thr in threads:
thr.join(timeout=3.14)
logging.info('conveyor-preparer: stopped')
def preparer(once, sleep_time, bulk, partition_wait_time=10):
# Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
logger_prefix = executable = 'conveyor-preparer'
run_daemon(
once=once,
graceful_stop=graceful_stop,
executable=executable,
logger_prefix=logger_prefix,
partition_wait_time=partition_wait_time,
sleep_time=sleep_time,
run_once_fnc=functools.partial(
run_once,
bulk=bulk
),
activities=None,
)
def run_once(bulk: int = 100, heartbeat_handler: "Optional[HeartbeatHandler]" = None, session: "Optional[Session]" = None, **kwargs) -> bool:
if heartbeat_handler:
worker_number, total_workers, logger = heartbeat_handler.live()
else:
# This is used in tests
worker_number, total_workers, logger = 0, 0, logging.log
start_time = time()
try:
req_sources = list_transfer_requests_and_source_replicas(
total_workers=total_workers,
worker_number=worker_number,
limit=bulk,
request_state=RequestState.PREPARING,
session=session
)
if not req_sources:
count = 0
updated_msg = 'had nothing to do'
else:
transfertool_filter = get_transfertool_filter(lambda rse_id: get_supported_transfertools(rse_id=rse_id, session=session))
requests = reduce_requests(req_sources, [rse_lookup_filter, sort_requests_minimum_distance, transfertool_filter], logger=logger)
count = preparer_update_requests(requests, session=session)
updated_msg = f'updated {count}/{bulk} requests'
except RucioException:
logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)
count = 0
updated_msg = 'errored'
logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time() - start_time))
queue_empty = False
if count < bulk:
queue_empty = True
return queue_empty
|
InstanceHelper.py
|
import sys
import logging
import os
import SocketServer
import SimpleSocket
import socket
import threading
RemoteArgumentCallBack=None
_GII_INSTANCE_PORT = 61957
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
cur_thread = threading.currentThread()
logging.info('remote data:' + data )
data = data.split()
#Note to the self.server.app
output = []
if not RemoteArgumentCallBack is None:
RemoteArgumentCallBack( data, output )
result = '\n'.join( output )
self.request.send( result )
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
stopped = False
allow_reuse_address = True
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.server_close()
self.stopped = True
def send_to_server(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
sock.send(message)
response = sock.recv(1024)
sock.close()
return response
def start_server(host, port):
server = ThreadedTCPServer((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.setDaemon(True)
server_thread.start()
return server
server = None
def checkSingleInstance(PORT=0):
if PORT == 0:
PORT = _GII_INSTANCE_PORT
# HOST = socket.gethostname()
HOST = '127.0.0.1'
argv=sys.argv[:]
argv.insert(0, os.path.realpath('.'))
# if len(argv) > 1:
# argv[1]=os.path.realpath(argv[1])
try:
send_to_server(HOST, PORT, ' '.join(argv)) #send a message to server
return False
except socket.error: #port not occupied, it's safe to start a new instance
server = start_server(HOST, PORT)
return True
def setRemoteArgumentCallback(callback):
global RemoteArgumentCallBack
RemoteArgumentCallBack=callback
def sendRemoteMsg( msg ):
PORT = _GII_INSTANCE_PORT
HOST = '127.0.0.1'
response = SimpleSocket.send_to_server( HOST, PORT, msg )
return response
|
stacks.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Provides facilities to dump all stacks of all threads in the process.
"""
import os
import sys
import time
import threading
import traceback
from debugpy.common import log
def dump():
"""Dump stacks of all threads in this process, except for the current thread.
"""
tid = threading.current_thread().ident
pid = os.getpid()
log.info("Dumping stacks for process {0}...", pid)
for t_ident, frame in sys._current_frames().items():
if t_ident == tid:
continue
for t in threading.enumerate():
if t.ident == tid:
t_name = t.name
t_daemon = t.daemon
break
else:
t_name = t_daemon = "<unknown>"
stack = "".join(traceback.format_stack(frame))
log.info(
"Stack of thread {0} (tid={1}, pid={2}, daemon={3}):\n\n{4}",
t_name,
t_ident,
pid,
t_daemon,
stack,
)
log.info("Finished dumping stacks for process {0}.", pid)
def dump_after(secs):
"""Invokes dump() on a background thread after waiting for the specified time.
"""
def dumper():
time.sleep(secs)
try:
dump()
except:
log.swallow_exception()
thread = threading.Thread(target=dumper)
thread.daemon = True
thread.start()
|
main_price_fetcher.py
|
# coding=utf-8
"""
PAT - the name of the current project.
main_subscriber.py - the name of the new file which you specify in the New File
dialog box during the file creation.
Hossein - the login name of the current user.
7 / 27 / 18 - the current system date.
9: 14 AM - the current system time.
PyCharm - the name of the IDE in which the file will be created.
"""
from observer import PriceFetcher
from publisher import delete_all_topics
from config import PROJECT_ID, TICKERS
import threading
import requests
import os
def get_tickers(filename=None, extension='txt'):
"""
find the tickers from the '.txt' files in the current directory
:param list filename: name of the files
:param extension: the file type
:return: list of tickers
"""
filename = filename or [file for file in os.listdir('./source/') if file.split('.')[-1] == extension]
tickers = []
for file in filename:
f = open('./source/' + file)
lines = f.read().split('\n')[1:]
tick = [line.split(',')[0] for line in lines]
tick = [t for t in filter(None, tick)]
tickers.extend(tick)
f.close()
tickers = list(set(tickers))
tickers = [t for t in filter(lambda x: '.' not in x and '^' not in x, tickers)]
tickers = [t.strip() for t in tickers]
return [t for t in filter(None, tickers)]
def send_simple_message():
"""
the email sender
:return: post request
"""
return requests.post(
"https://api.mailgun.net/v3/YOUR_DOMAIN_NAME/messages",
auth=("api", ''),
data={"from": "Excited User <[email protected]>",
"to": ["[email protected]"],
"subject": "Hello",
"text": "Testing some Mailgun awesomness!"})
def main():
"""
The main function to start the price fetcher
"""
# send_simple_message()
nthreads = 10
tickers = TICKERS # get_tickers(filename=['SPX.csv'])
delete_all_topics(PROJECT_ID)
ntickers = int(len(tickers)/nthreads)
for ithread in range(nthreads):
ilist = tickers[ithread * ntickers: (ithread + 1) * ntickers]
scanner = PriceFetcher(tickers=ilist, topic='live_publisher_')
thread = threading.Thread(target=scanner.engine, name='t' + str(ithread))
thread.start()
if __name__ == '__main__':
main()
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1040
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "1"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("LongControlEnabled", "0"),
("RadarDisableEnabled", "0"),
("MdpsHarnessEnabled", "0"),
("SccEnabled", "1"),
("EnableOPwithCC", "1"),
("SccHarnessPresent", "0"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['proclogd']
del managed_processes['logcatd']
del managed_processes['uploader']
del managed_processes['updated']
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Server.py
|
import socket
import threading
import beepy
DIS_MSG = "disconnect"
FORMAT = "utf-8"
HEADER = 64
PORT = 9090
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
print(SERVER)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[Server]: New Connection - {addr} connected")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DIS_MSG:
connected = False
if msg == "ping":
print(f"[Server]: Address = {addr} Message = {msg}")
beepy.beep(sound=4)
else:
print(f"[Server]: Address = {addr} Message = {msg}")
beepy.beep(sound=1)
conn.close()
def start():
server.listen()
print(f"[Server]: Listening... {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[Server]: Active connections - {threading.activeCount() - 1} ")
print("[Server]: Starting... ")
start()
|
bot_uptime.py
|
from flask import Flask
from threading import Thread
from waitress import serve
app = Flask('')
@app.route('/')
def home():
return "Bot is online!"
def run():
# production server using waitress
serve(app, host="0.0.0.0", port=8082)
def start_server():
t = Thread(target=run)
t.start()
|
testclient_launcher.py
|
#!/usr/bin/env python
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that launches testclient.Loopback in another process."""
import multiprocessing
from google.apputils import app
from fleetspeak.src.client.daemonservice.testclient import testclient
def main(argv=None):
del argv
p = multiprocessing.Process(target=testclient.Loopback)
p.start()
p.join()
if __name__ == "__main__":
app.run()
|
test_0.py
|
import threading
import thread
from time import sleep
from math import sqrt, ceil, floor
import random
from random import Random
import time
import copy
import ACS__POA
from ACS import CBDescIn
from Acspy.Clients.SimpleClient import PySimpleClient
import sb
import jsonAcs
class MockSched():
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def __init__(self, site_type):
self.log = my_log(title=__name__)
self.log.info([['y', " - MockSched - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.debug = False
self.expire = 86400 # one day
self.cycle_blocks = []
self.acs_blocks = dict()
self.acs_blocks['sched_block'] = dict()
self.acs_blocks['metadata'] = dict()
self.active_sched_block = 0
self.client = PySimpleClient()
self.supervisor = self.client.getComponent("ArraySupervisor")
# print 'got ArraySupervisor ............'
self.n_sched_block = [5, 15]
self.max_n_obs_block = 7 if self.site_type == "N" else 32
self.max_n_obs_block = min(self.max_n_obs_block, len(self.tel_ids))
# self.max_n_obs_block = len(self.tel_ids)
self.loop_sleep = 4
self.az_min_max = [0, 360]
self.zen_min_max_tel = [0, 70]
self.zen_min_max_pnt = [0, 20]
rnd_seed = getTime()
rnd_seed = 10987268332
self.rnd_gen = Random(rnd_seed)
print 'xxxxxxxxxxx'
active = self.supervisor.listSchedulingBlocks()
print '-----', active
self.cancel_sched_blocks(active[0])
print 'sleep...'
sleep(10)
print 'check...'
print '---', len(
self.supervisor.getSbOperationStatus(active[0]).ob_statuses
), '------------', self.supervisor.getSbOperationStatus(active[0]).ob_statuses
print '---', self.supervisor.getSbOperationStatus(active[0]
).ob_statuses[-1].status
activeNow = self.supervisor.listSchedulingBlocks()
print 'active now ....', activeNow
# self.threads = []
# run_event = threading.Event()
# run_event.set()
# t = threading.Thread(target=self.loop, args = (run_event,))
# t.start()
# self.threads.append(t)
# try:
# while 1:
# sleep(.1)
# except KeyboardInterrupt:
# run_event.clear()
# for t in self.threads:
# t.join()
return
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def cancel_sched_blocks(self, sched_blk_id):
class MyVoid(ACS__POA.CBvoid):
def working(self, completion, desc):
# print "bbbbbbbbbb Callback working",sched_blk_id
return
def done(self, completion, desc):
# print "bbbbbbbbbbbbb Callback done",sched_blk_id
return
desc = CBDescIn(0L, 0L, 0L)
cb = MyVoid()
self.log.info([['r', " ---- MockSched.cancel_sched_blocks() ... "],
['g', sched_blk_id]])
self.supervisor.cancelSchedulingBlock(
sched_blk_id, self.client.activateOffShoot(cb), desc
)
self.log.info([['r', " ++++ MockSched.cancel_sched_blocks() ... "],
['g', sched_blk_id]])
return
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def cancel_zombie_sched_blocks(self, sched_block_ids=None):
if sched_block_ids is None:
try:
active = self.supervisor.listSchedulingBlocks()
except Exception as e:
self.log.debug([['b', "- Exception - MockSched.listSchedulingBlocks: "],
['r', e]])
active = []
sched_block_ids = [
x for x in active if x not in self.acs_blocks['sched_block']
]
sched_block_ids = active
if len(sched_block_ids) == 0:
return
self.log.info([['r', " - MockSched.cancel_zombie_sched_blocks() ..."],
['y', sched_block_ids]])
for sched_block_id_now in sched_block_ids:
self.cancel_sched_blocks(sched_block_id_now)
# t = threading.Thread(target=self.cancel_sched_blocks,args=(sched_block_id_now,))
# t.start()
# t.join()
# self.threads.append(t)
return
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def init_block_cycle(self):
self.log.info([['p', " - MockSched.init_block_cycle() ..."]])
script_name = "guiACS_sched_blocks_script0"
self.nCycles = [1, 5]
self.n_sched_block = 40
self.max_n_obs_block = 1
self.obs_block_seconds = 20
# cancel sched_blocks which should have expired
# self.cancel_zombie_sched_blocks()
# init local bookkeeping objects
self.cycle_blocks = []
self.acs_blocks = dict()
self.acs_blocks['sched_block'] = dict()
self.acs_blocks['metadata'] = dict()
nCycles = self.rnd_gen.randint(self.nCycles[0], self.nCycles[1])
for n_cycle_now in range(nCycles):
base_name = str(getTime()) + "_"
tel_ids = copy.deepcopy(self.tel_ids)
n_tels = len(tel_ids)
# n_sched_blocks = self.rnd_gen.randint(1, min(n_tels, self.n_sched_block))
n_sched_blocks = self.n_sched_block
# generate random target/pointing ids
target_pos = dict()
blockTrgs = dict()
blockTrgPnt = dict()
n_trgs = self.rnd_gen.randint(1, n_sched_blocks)
for n_trg_try in range(n_sched_blocks):
n_trg_now = self.rnd_gen.randint(0, n_trgs - 1)
if n_trg_now not in blockTrgs:
blockTrgs[n_trg_now] = [n_trg_try]
else:
blockTrgs[n_trg_now].append(n_trg_try)
blockTrgPnt[n_trg_try] = {
"n_trg": n_trg_now,
"n_pnt": len(blockTrgs[n_trg_now]) - 1
}
cycle_blocks = []
for n_sched_block_now in range(n_sched_blocks):
sched_block_id = "schBlock_" + base_name + str(n_sched_block_now)
n_tel_now = self.rnd_gen.randint(1, max(1, len(tel_ids) - n_sched_blocks))
# sched_tel_ids = random.sample(tel_ids, n_tel_now)
# tel_ids = [x for x in tel_ids if x not in sched_tel_ids]
sub_arr = []
# for sched_tel_id_now in sched_tel_ids:
# tel_type = sb.SST if sched_tel_id_now[0] == 'S' else sb.MST if sched_tel_id_now[0] == 'M' else sb.LST
# sub_arr += [ sb.Telescope(sched_tel_id_now, tel_type) ]
sched_conf = sb.Configuration(
sb.InstrumentConfiguration(
sb.PointingMode(2, sb._divergent(2)), sb.Subarray([], sub_arr)
), "camera", "rta"
)
n_obs_blocks = self.rnd_gen.randint(1, self.max_n_obs_block)
n_trg = blockTrgPnt[n_sched_block_now]["n_trg"]
n_pnt = blockTrgPnt[n_sched_block_now]["n_pnt"]
if not n_trg in target_pos:
target_pos[n_trg] = [
(
self.rnd_gen.random() *
(self.az_min_max[1] - self.az_min_max[0])
) + self.az_min_max[0],
(
self.rnd_gen.random() *
(self.zen_min_max_tel[1] - self.zen_min_max_tel[0])
) + self.zen_min_max_tel[0]
]
target_id = "target_" + str(n_trg)
obs_blocks = []
for n_blockNow in range(n_obs_blocks):
obs_block_id = "obs_block_" + str(getTime())
point_pos = copy.deepcopy(target_pos[n_trg])
point_pos[0] += (self.rnd_gen.random() - 0.5) * 10
point_pos[1] += (self.rnd_gen.random() - 0.5) * 10
if point_pos[0] > self.az_min_max[1]:
point_pos[0] -= 360
elif point_pos[0] < self.az_min_max[0]:
point_pos[0] += 360
obs_coords = sb.Coordinates(
2,
sb.HorizontalCoordinates(
target_pos[n_trg][1], target_pos[n_trg][0]
)
)
# obs_coords = sb.Coordinates(3,sb.GalacticCoordinates(target_pos[n_trg][1],target_pos[n_trg][0]))
obs_mode = sb.ObservingMode(
sb.Slewing(1), sb.ObservingType(2, sb.GridSurvey(1, 1, 1))
)
obs_source = sb.Source(
target_id, sb.placeholder, sb.High, sb.RegionOfInterest(100),
obs_mode, obs_coords
)
obs_conds = sb.ObservingConditions(
sb.DateTime(1), self.obs_block_seconds, 1, sb.Quality(1, 1, 1),
sb.Weather(1, 1, 1, 1)
)
obs_block = sb.ObservationBlock(
obs_block_id, obs_source, obs_conds, script_name, 0
)
obs_blocks += [obs_block]
# temporary way to store meta-data
# should be replaced by global coordinate access function
self.acs_blocks['metadata'][obs_block_id + "_"
+ "point_pos"] = point_pos
sched_block = sb.SchedulingBlock(
sched_block_id, sb.Proposal("proposalId"), sched_conf, obs_blocks
)
cycle_blocks.append(sched_block)
self.acs_blocks['sched_block'][sched_block_id] = sched_block
self.cycle_blocks.append(cycle_blocks)
return
# ------------------------------------------------------------------
# move one from wait to run
# ------------------------------------------------------------------
def submit_block_cycle(self):
self.log.info([['g', " - starting MockSched.submit_block_cycle ..."]])
if len(self.cycle_blocks) >= self.active_sched_block:
self.active_sched_block = 0
self.init_block_cycle()
# grab the current sched_block from the queue
blockCycle = self.cycle_blocks[self.active_sched_block]
self.active_sched_block += 1
# submit the scheduling blocks
self.log.info([['g', " - submitting ..."]])
for sched_block in blockCycle:
try:
self.log.info([['y', " --- try putSchedulingBlock ", sched_block.id]])
complt = self.supervisor.putSchedulingBlock(sched_block)
except Exception as e:
self.log.debug([['b', "- Exception - MockSched.putSchedulingBlock: "],
['r', e]])
self.log.info([['y', " ----- try putSchedulingBlock done !"]])
return
# ------------------------------------------------------------------
# move one from wait to run
# ------------------------------------------------------------------
def check_sched_blocks(self):
self.log.debug([['b', " - starting MockSched.check_sched_blocks ..."]])
try:
active = self.supervisor.listSchedulingBlocks()
except Exception as e:
self.log.debug([['b', "- Exception - MockSched.listSchedulingBlocks: "],
['r', e]])
active = []
active = [x for x in active if x in self.acs_blocks['sched_block']]
self.log.debug([['b', " --- got ", len(active), " active blocks"]])
# for block_name in active:
# status = self.supervisor.getSchedulingBlockStatus(block_name)
# opstatus = self.supervisor.getSbOperationStatus(block_name)
# self.log.info([['y'," - active_scheduling_blocks - "],['g',active,'-> '],['y',status,' ']])
# for nob in range(len(opstatus.ob_statuses)):
# phases = opstatus.ob_statuses[nob].phases
# # for p in phases:
# # self.log.info([['y'," -- phases - ",block_name,' ',opstatus.ob_statuses[nob].id,' ',opstatus.ob_statuses[nob].status,' '],['g',p.heartbeat_counter,' ',p.name,' ',p.status,' ',p.progress_message]])
# # break
return len(active)
# ------------------------------------------------------------------
#
# ------------------------------------------------------------------
def loop(self, run_event):
self.log.info([['g', " - starting MockSched.loop ..."]])
self.submit_block_cycle()
while run_event.is_set():
n_sched_blocks = self.check_sched_blocks()
self.log.info([['g', " - will now wait for 5 sec ..."]])
sleep(5)
self.log.info([['g', " - will now try to cancel all ..."]])
self.cancel_zombie_sched_blocks()
if n_sched_blocks == 0:
self.submit_block_cycle()
sleep(self.loop_sleep)
return
# ------------------------------------------------------------------
# ignore everything below .....
# ignore everything below .....
# ignore everything below .....
# ------------------------------------------------------------------
import logging
import numbers
import numpy as np
import time
useCol = 1 #; useCol = 0
use_log_title = False
add_msg_ele_space = False
no_sub_arr_name = "empty_sub_array"
inst_pos_0 = [0, 90]
# has_acs = True
# ------------------------------------------------------------------
# different databases/configurations for north or south
# ------------------------------------------------------------------
def initTelIds(site_type):
tel_ids = []
# ------------------------------------------------------------------
# south
# ------------------------------------------------------------------
if site_type == "S":
tel_ids += ["L_0"]
tel_ids += ["L_1"]
tel_ids += ["L_2"]
tel_ids += ["L_3"]
tel_ids += ["M_0"]
tel_ids += ["M_1"]
tel_ids += ["M_2"]
tel_ids += ["M_3"]
tel_ids += ["M_4"]
tel_ids += ["M_5"]
tel_ids += ["M_6"]
tel_ids += ["M_7"]
tel_ids += ["M_8"]
tel_ids += ["M_9"]
tel_ids += ["M_10"]
tel_ids += ["M_11"]
tel_ids += ["M_12"]
tel_ids += ["M_13"]
tel_ids += ["M_14"]
tel_ids += ["M_15"]
tel_ids += ["M_16"]
tel_ids += ["M_17"]
tel_ids += ["M_18"]
tel_ids += ["M_19"]
tel_ids += ["M_20"]
tel_ids += ["M_21"]
tel_ids += ["M_22"]
tel_ids += ["M_23"]
tel_ids += ["M_24"]
tel_ids += ["S_0"]
tel_ids += ["S_1"]
tel_ids += ["S_2"]
tel_ids += ["S_3"]
tel_ids += ["S_4"]
tel_ids += ["S_5"]
tel_ids += ["S_6"]
tel_ids += ["S_7"]
tel_ids += ["S_8"]
tel_ids += ["S_9"]
tel_ids += ["S_10"]
tel_ids += ["S_11"]
tel_ids += ["S_12"]
tel_ids += ["S_13"]
tel_ids += ["S_14"]
tel_ids += ["S_15"]
tel_ids += ["S_16"]
tel_ids += ["S_17"]
tel_ids += ["S_18"]
tel_ids += ["S_19"]
tel_ids += ["S_20"]
tel_ids += ["S_21"]
tel_ids += ["S_22"]
tel_ids += ["S_23"]
tel_ids += ["S_24"]
tel_ids += ["S_25"]
tel_ids += ["S_26"]
tel_ids += ["S_27"]
tel_ids += ["S_28"]
tel_ids += ["S_29"]
tel_ids += ["S_30"]
tel_ids += ["S_31"]
tel_ids += ["S_32"]
tel_ids += ["S_33"]
tel_ids += ["S_34"]
tel_ids += ["S_35"]
tel_ids += ["S_36"]
tel_ids += ["S_37"]
tel_ids += ["S_38"]
tel_ids += ["S_39"]
tel_ids += ["S_40"]
tel_ids += ["S_41"]
tel_ids += ["S_42"]
tel_ids += ["S_43"]
tel_ids += ["S_44"]
tel_ids += ["S_45"]
tel_ids += ["S_46"]
tel_ids += ["S_47"]
tel_ids += ["S_48"]
tel_ids += ["S_49"]
tel_ids += ["S_50"]
tel_ids += ["S_51"]
tel_ids += ["S_52"]
tel_ids += ["S_53"]
tel_ids += ["S_54"]
tel_ids += ["S_55"]
tel_ids += ["S_56"]
tel_ids += ["S_57"]
tel_ids += ["S_58"]
tel_ids += ["S_59"]
tel_ids += ["S_60"]
tel_ids += ["S_61"]
tel_ids += ["S_62"]
tel_ids += ["S_63"]
tel_ids += ["S_64"]
tel_ids += ["S_65"]
tel_ids += ["S_66"]
tel_ids += ["S_67"]
tel_ids += ["S_68"]
tel_ids += ["S_69"]
# ------------------------------------------------------------------
# north
# ------------------------------------------------------------------
if site_type == "N":
tel_ids += ["L_0"]
tel_ids += ["L_1"]
tel_ids += ["L_2"]
tel_ids += ["L_3"]
tel_ids += ["M_0"]
tel_ids += ["M_1"]
tel_ids += ["M_2"]
tel_ids += ["M_3"]
tel_ids += ["M_4"]
tel_ids += ["M_5"]
tel_ids += ["M_6"]
tel_ids += ["M_7"]
tel_ids += ["M_8"]
tel_ids += ["M_9"]
tel_ids += ["M_10"]
tel_ids += ["M_11"]
tel_ids += ["M_12"]
tel_ids += ["M_13"]
tel_ids += ["M_14"]
return tel_ids
tel_ids = dict()
tel_ids["N"] = initTelIds("N")
tel_ids["S"] = initTelIds("S")
def getTime():
return int(time.time() * 1e6)
# --------------------------------------------------------------------------------------------------
# color output
# --------------------------------------------------------------------------------------------------
def setColDict():
col_blue = "\033[34m"
col_red = "\033[31m"
ColGreen = "\033[32m"
ColDef = "\033[0m"
ColLightBlue = "\033[94m"
col_yellow = "\033[33m"
ColPurple = "\033[35m"
ColCyan = "\033[36m"
ColUnderLine = "\033[4;30m"
ColWhiteOnBlack = "\33[40;37;1m"
ColWhiteOnRed = "\33[41;37;1m"
ColWhiteOnGreen = "\33[42;37;1m"
ColWhiteOnYellow = "\33[43;37;1m"
def no_color(msg):
return '' if (str(msg) is '') else str(msg)
def blue(msg):
return '' if (str(msg) is '') else col_blue + str(msg) + ColDef
def red(msg):
return '' if (str(msg) is '') else col_red + str(msg) + ColDef
def green(msg):
return '' if (str(msg) is '') else ColGreen + str(msg) + ColDef
def light_blue(msg):
return '' if (str(msg) is '') else ColLightBlue + str(msg) + ColDef
def yellow(msg):
return '' if (str(msg) is '') else col_yellow + str(msg) + ColDef
def purple(msg):
return '' if (str(msg) is '') else ColPurple + str(msg) + ColDef
def cyan(msg):
return '' if (str(msg) is '') else ColCyan + str(msg) + ColDef
def white_on_black(msg):
return '' if (str(msg) is '') else ColWhiteOnBlack + str(msg) + ColDef
def red_on_black(msg):
return '' if (str(msg) is '') else ColWhiteOnBlack + col_red + str(msg) + ColDef
def blue_on_black(msg):
return '' if (str(msg) is '') else ColWhiteOnBlack + col_blue + str(msg) + ColDef
def yellow_on_black(msg):
return '' if (str(msg) is ''
) else ColWhiteOnBlack + col_yellow + str(msg) + ColDef
def white_on_red(msg):
return '' if (str(msg) is '') else ColWhiteOnRed + str(msg) + ColDef
def yellow_on_red(msg):
return '' if (str(msg) is '') else ColWhiteOnRed + col_yellow + str(msg) + ColDef
def white_on_yellow(msg):
return '' if (str(msg) is '') else ColWhiteOnYellow + str(msg) + ColDef
def white_on_green(msg):
return '' if (str(msg) is '') else ColWhiteOnGreen + str(msg) + ColDef
colD = [dict(), dict()]
colD[0][''] = no_color
colD[1][''] = no_color
colD[0]['r'] = no_color
colD[1]['r'] = red
colD[0]['g'] = no_color
colD[1]['g'] = green
colD[0]['b'] = no_color
colD[1]['b'] = blue
colD[0]['y'] = no_color
colD[1]['y'] = yellow
colD[0]['p'] = no_color
colD[1]['p'] = purple
colD[0]['c'] = no_color
colD[1]['c'] = cyan
colD[0]['lb'] = no_color
colD[1]['lb'] = light_blue
colD[0]['wb'] = no_color
colD[1]['wb'] = white_on_black
colD[0]['rb'] = no_color
colD[1]['rb'] = red_on_black
colD[0]['bb'] = no_color
colD[1]['bb'] = blue_on_black
colD[0]['yb'] = no_color
colD[1]['yb'] = yellow_on_black
colD[0]['wr'] = no_color
colD[1]['wr'] = white_on_red
colD[0]['yr'] = no_color
colD[1]['yr'] = yellow_on_red
colD[0]['wy'] = no_color
colD[1]['wy'] = white_on_yellow
colD[0]['wg'] = no_color
colD[1]['wg'] = white_on_green
return colD
colD = setColDict()
class my_log():
def __init__(self, name='', title='', do_parse_msg=True, *args, **kwargs):
self.do_parse_msg = do_parse_msg
self.name = "root" if name is "" else name
self.title = colD[useCol]['c'](
"" if title is "" else (" [" + title + "]" if use_log_title else "")
)
self.log = logging.getLogger(self.name)
# common lock for all loggers
self.lock = my_lock("my_log")
def parse_msg(self, msg_in):
if not self.do_parse_msg:
return msg_in
# --------------------------------------------------------------------------------------------------
# if the input is a list
# --------------------------------------------------------------------------------------------------
if isinstance(msg_in, list):
msg = ""
for msg_now in msg_in:
# --------------------------------------------------------------------------------------------------
# if there is a list of messages
# --------------------------------------------------------------------------------------------------
if isinstance(msg_now, list):
# list with one element
if len(msg_now) == 1:
if add_msg_ele_space and msg is not "":
msg += " "
msg += str(msg_now[0])
# list with multiple elements
elif len(msg_now) >= 2:
# first element is a color indicator
if msg_now[0] in colD[useCol]:
color_func = colD[useCol][msg_now[0]]
# either can be one or more messages after the color indicator
if len(msg_now) == 2:
msg_str = str(msg_now[1])
else:
msg_str = (" ").join([
str(ele_now) for ele_now in msg_now[1:]
])
# there is no color indicator, just a list of messages
else:
color_func = colD[useCol]['']
msg_str = (" ").join([str(ele_now) for ele_now in msg_now])
# compose the colored output from the (joined list of) messages(s)
if add_msg_ele_space and msg is not "":
msg += color_func(" ")
msg += color_func(msg_str)
# --------------------------------------------------------------------------------------------------
# if there is a single message (non-list)
# --------------------------------------------------------------------------------------------------
else:
if add_msg_ele_space and msg is not "":
msg += " "
msg += str(msg_now)
# --------------------------------------------------------------------------------------------------
# if the input is a simple element (non-list)
# --------------------------------------------------------------------------------------------------
else:
msg = str(msg_in)
# finally, send the output, with the optional title added
# --------------------------------------------------------------------------------------------------
return (msg + self.title)
def debug(self, msg_in, *args, **kwargs):
with self.lock:
self.log.debug(self.parse_msg(msg_in), *args, **kwargs)
def info(self, msg_in, *args, **kwargs):
with self.lock:
self.log.info(self.parse_msg(msg_in), *args, **kwargs)
def warning(self, msg_in, *args, **kwargs):
with self.lock:
self.log.warning(self.parse_msg(msg_in), *args, **kwargs)
def warn(self, msg_in, *args, **kwargs):
with self.lock:
self.log.warn(self.parse_msg(msg_in), *args, **kwargs)
def error(self, msg_in, *args, **kwargs):
with self.lock:
self.log.error(self.parse_msg(msg_in), *args, **kwargs)
def critical(self, msg_in, *args, **kwargs):
with self.lock:
self.log.critical(self.parse_msg(msg_in), *args, **kwargs)
# locker class by name
class my_lock():
locks = {}
def __init__(self, name='', seconds_to_check=None):
self.name = "generic" if name is "" else name
self.seconds_to_check = max(
0.0001,
min(
0.5, (
seconds_to_check
if isinstance(seconds_to_check, numbers.Number) else 0.05
)
)
)
self.n_max_checks = max(5 / self.seconds_to_check, 2)
if not self.name in my_lock.locks:
my_lock.locks[self.name] = False
def __enter__(self):
n_checked = 0
while my_lock.locks[self.name]:
n_checked += 1
if n_checked > self.n_max_checks:
raise Warning(" - could not get lock for " + self.name + " ...")
sleep(self.seconds_to_check)
my_lock.locks[self.name] = True
def __exit__(self, type, value, traceback):
my_lock.locks[self.name] = False
MockSched('N')
|
language_server_client.py
|
import logging
import itertools
from functools import partial
log = logging.getLogger(__name__)
def _read_into_queue(reader, queue):
def _put_into_queue(msg):
queue.put(msg)
reader.listen(_put_into_queue)
class _LanguageServerClient(object):
def __init__(self, writer, reader):
import threading
try:
from queue import Queue
except:
from Queue import Queue
self.writer = writer
self.reader = reader
self._queue = Queue()
t = threading.Thread(target=_read_into_queue, args=(reader, self._queue))
t.start()
self.require_exit_messages = True
self.next_id = partial(next, itertools.count())
def write(self, contents):
self.writer.write(contents)
def next_message(self):
from robotframework_ls_tests import conftest
return self._queue.get(block=True, timeout=conftest.TIMEOUT)
def wait_for_message(self, match):
found = False
while not found:
msg = self.next_message()
for key, value in match.items():
if msg.get(key) == value:
continue
log.info("Message found:\n%s\nwhile waiting for\n%s" % (msg, match))
break
else:
found = True
return msg
def initialize(self, root_path, msg_id=None, process_id=None):
from robotframework_ls.uris import from_fs_path
msg_id = msg_id if msg_id is not None else self.next_id()
self.write(
{
"jsonrpc": "2.0",
"id": msg_id,
"method": "initialize",
"params": {
"processId": process_id,
"rootPath": root_path,
"rootUri": from_fs_path(root_path),
"capabilities": {
"workspace": {
"applyEdit": True,
"didChangeConfiguration": {"dynamicRegistration": True},
"didChangeWatchedFiles": {"dynamicRegistration": True},
"symbol": {"dynamicRegistration": True},
"executeCommand": {"dynamicRegistration": True},
},
"textDocument": {
"synchronization": {
"dynamicRegistration": True,
"willSave": True,
"willSaveWaitUntil": True,
"didSave": True,
},
"completion": {
"dynamicRegistration": True,
"completionItem": {
"snippetSupport": True,
"commitCharactersSupport": True,
},
},
"hover": {"dynamicRegistration": True},
"signatureHelp": {"dynamicRegistration": True},
"definition": {"dynamicRegistration": True},
"references": {"dynamicRegistration": True},
"documentHighlight": {"dynamicRegistration": True},
"documentSymbol": {"dynamicRegistration": True},
"codeAction": {"dynamicRegistration": True},
"codeLens": {"dynamicRegistration": True},
"formatting": {"dynamicRegistration": True},
"rangeFormatting": {"dynamicRegistration": True},
"onTypeFormatting": {"dynamicRegistration": True},
"rename": {"dynamicRegistration": True},
"documentLink": {"dynamicRegistration": True},
},
},
"trace": "off",
},
}
)
msg = self.wait_for_message({"id": msg_id})
assert "capabilities" in msg["result"]
return msg
def shutdown(self):
self.write(
{"jsonrpc": "2.0", "id": self.next_id(), "method": "shutdown",}
)
def exit(self):
self.write(
{"jsonrpc": "2.0", "id": self.next_id(), "method": "exit",}
)
|
test-driver.py
|
#! /somewhere/python3
from contextlib import contextmanager, _GeneratorContextManager
from queue import Queue, Empty
from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List, Iterable
from xml.sax.saxutils import XMLGenerator
from colorama import Style
from pathlib import Path
import queue
import io
import threading
import argparse
import base64
import codecs
import os
import ptpython.repl
import pty
import re
import shlex
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import unicodedata
CHAR_TO_KEY = {
"A": "shift-a",
"N": "shift-n",
"-": "0x0C",
"_": "shift-0x0C",
"B": "shift-b",
"O": "shift-o",
"=": "0x0D",
"+": "shift-0x0D",
"C": "shift-c",
"P": "shift-p",
"[": "0x1A",
"{": "shift-0x1A",
"D": "shift-d",
"Q": "shift-q",
"]": "0x1B",
"}": "shift-0x1B",
"E": "shift-e",
"R": "shift-r",
";": "0x27",
":": "shift-0x27",
"F": "shift-f",
"S": "shift-s",
"'": "0x28",
'"': "shift-0x28",
"G": "shift-g",
"T": "shift-t",
"`": "0x29",
"~": "shift-0x29",
"H": "shift-h",
"U": "shift-u",
"\\": "0x2B",
"|": "shift-0x2B",
"I": "shift-i",
"V": "shift-v",
",": "0x33",
"<": "shift-0x33",
"J": "shift-j",
"W": "shift-w",
".": "0x34",
">": "shift-0x34",
"K": "shift-k",
"X": "shift-x",
"/": "0x35",
"?": "shift-0x35",
"L": "shift-l",
"Y": "shift-y",
" ": "spc",
"M": "shift-m",
"Z": "shift-z",
"\n": "ret",
"!": "shift-0x02",
"@": "shift-0x03",
"#": "shift-0x04",
"$": "shift-0x05",
"%": "shift-0x06",
"^": "shift-0x07",
"&": "shift-0x08",
"*": "shift-0x09",
"(": "shift-0x0A",
")": "shift-0x0B",
}
class Logger:
def __init__(self) -> None:
self.logfile = os.environ.get("LOGFILE", "/dev/null")
self.logfile_handle = codecs.open(self.logfile, "wb")
self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
self._print_serial_logs = True
@staticmethod
def _eprint(*args: object, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
def close(self) -> None:
self.xml.endElement("logfile")
self.xml.endDocument()
self.logfile_handle.close()
def sanitise(self, message: str) -> str:
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
if "machine" in attributes:
return "{}: {}".format(attributes["machine"], message)
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.characters(message)
self.xml.endElement("line")
def info(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
sys.exit(1)
def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
self._eprint(self.maybe_prefix(message, attributes))
self.drain_log_queue()
self.log_line(message, attributes)
def log_serial(self, message: str, machine: str) -> None:
self.enqueue({"msg": message, "machine": machine, "type": "serial"})
if self._print_serial_logs:
self._eprint(
Style.DIM + "{} # {}".format(machine, message) + Style.RESET_ALL
)
def enqueue(self, item: Dict[str, str]) -> None:
self.queue.put(item)
def drain_log_queue(self) -> None:
try:
while True:
item = self.queue.get_nowait()
msg = self.sanitise(item["msg"])
del item["msg"]
self.log_line(msg, item)
except Empty:
pass
@contextmanager
def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
self._eprint(self.maybe_prefix(message, attributes))
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.characters(message)
self.xml.endElement("head")
tic = time.time()
self.drain_log_queue()
yield
self.drain_log_queue()
toc = time.time()
self.log("({:.2f} seconds)".format(toc - tic))
self.xml.endElement("nest")
rootlog = Logger()
def make_command(args: list) -> str:
return " ".join(map(shlex.quote, (map(str, args))))
def retry(fn: Callable, timeout: int = 900) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(timeout):
if fn(False):
return
time.sleep(1)
if not fn(True):
raise Exception(f"action timed out after {timeout} seconds")
def _perform_ocr_on_screenshot(
screenshot_path: str, model_ids: Iterable[int]
) -> List[str]:
if shutil.which("tesseract") is None:
raise Exception("OCR requested but enableOCR is false")
magick_args = (
"-filter Catrom -density 72 -resample 300 "
+ "-contrast -normalize -despeckle -type grayscale "
+ "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ "-blur 1x65535"
)
tess_args = f"-c debug_file=/dev/null --psm 11"
cmd = f"convert {magick_args} {screenshot_path} tiff:{screenshot_path}.tiff"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
model_results = []
for model_id in model_ids:
cmd = f"tesseract {screenshot_path}.tiff - {tess_args} --oem {model_id}"
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(f"OCR failed with exit code {ret.returncode}")
model_results.append(ret.stdout.decode("utf-8"))
return model_results
class StartCommand:
"""The Base Start Command knows how to append the necesary
runtime qemu options as determined by a particular test driver
run. Any such start command is expected to happily receive and
append additional qemu args.
"""
_cmd: str
def cmd(
self,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
) -> str:
display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
if not display_available:
display_opts += " -nographic"
# qemu options
qemu_opts = ""
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
" -device virtio-serial"
" -device virtconsole,chardev=shell"
" -device virtio-rng-pci"
" -serial stdio"
)
# TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
return (
f"{self._cmd}"
f" -monitor unix:{monitor_socket_path}"
f" -chardev socket,id=shell,path={shell_socket_path}"
f"{qemu_opts}"
f"{display_opts}"
)
@staticmethod
def build_environment(
state_dir: Path,
shared_dir: Path,
) -> dict:
# We make a copy to not update the current environment
env = dict(os.environ)
env.update(
{
"TMPDIR": str(state_dir),
"SHARED_DIR": str(shared_dir),
"USE_TMPDIR": "1",
}
)
return env
def run(
self,
state_dir: Path,
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=state_dir,
env=self.build_environment(state_dir, shared_dir),
)
class NixStartScript(StartCommand):
"""A start script from nixos/modules/virtualiation/qemu-vm.nix
that also satisfies the requirement of the BaseStartCommand.
These Nix commands have the particular charactersitic that the
machine name can be extracted out of them via a regex match.
(Admittedly a _very_ implicit contract, evtl. TODO fix)
"""
def __init__(self, script: str):
self._cmd = script
@property
def machine_name(self) -> str:
match = re.search("run-(.+)-vm$", self._cmd)
name = "machine"
if match:
name = match.group(1)
return name
class LegacyStartCommand(StartCommand):
"""Used in some places to create an ad-hoc machine instead of
using nix test instrumentation + module system for that purpose.
Legacy.
"""
def __init__(
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuFlags: Optional[str] = None,
):
self._cmd = "qemu-kvm -m 384"
# networking
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if netBackendArgs is not None:
net_backend += "," + netBackendArgs
if netFrontendArgs is not None:
net_frontend += "," + netFrontendArgs
self._cmd += f" {net_backend} {net_frontend}"
# hda
hda_cmd = ""
if hda is not None:
hda_path = hda[0].resolve()
hda_interface = hda[1]
if hda_interface == "scsi":
hda_cmd += (
f" -drive id=hda,file={hda_path},werror=report,if=none"
" -device scsi-hd,drive=hda"
)
else:
hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
self._cmd += hda_cmd
# cdrom
if cdrom is not None:
self._cmd += f" -cdrom {cdrom}"
# usb
usb_cmd = ""
if usb is not None:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
usb_cmd += (
" -device usb-ehci"
f" -drive id=usbdisk,file={usb},if=none,readonly"
" -device usb-storage,drive=usbdisk "
)
self._cmd += usb_cmd
# bios
if bios is not None:
self._cmd += f" -bios {bios}"
# qemu flags
if qemuFlags is not None:
self._cmd += f" {qemuFlags}"
class Machine:
"""A handle to the machine with this name, that also knows how to manage
the machine lifecycle with the help of a start script / command."""
name: str
tmp_dir: Path
shared_dir: Path
state_dir: Path
monitor_path: Path
shell_path: Path
start_command: StartCommand
keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen]
pid: Optional[int]
monitor: Optional[socket.socket]
shell: Optional[socket.socket]
serial_thread: Optional[threading.Thread]
booted: bool
connected: bool
# Store last serial console lines for use
# of wait_for_console_text
last_lines: Queue = Queue()
def __repr__(self) -> str:
return f"<Machine '{self.name}'>"
def __init__(
self,
tmp_dir: Path,
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
allow_reboot: bool = False,
) -> None:
self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name
self.start_command = start_command
# set up directories
self.shared_dir = self.tmp_dir / "shared-xchg"
self.shared_dir.mkdir(mode=0o700, exist_ok=True)
self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
self.monitor_path = self.state_dir / "monitor"
self.shell_path = self.state_dir / "shell"
if (not self.keep_vm_state) and self.state_dir.exists():
self.cleanup_statedir()
self.state_dir.mkdir(mode=0o700, exist_ok=True)
self.process = None
self.pid = None
self.monitor = None
self.shell = None
self.serial_thread = None
self.booted = False
self.connected = False
@staticmethod
def create_startcommand(args: Dict[str, str]) -> StartCommand:
rootlog.warning(
"Using legacy create_startcommand(),"
"please use proper nix test vm instrumentation, instead"
"to generate the appropriate nixos test vm qemu startup script"
)
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
netFrontendArgs=args.get("netFrontendArgs"),
hda=hda,
cdrom=args.get("cdrom"),
usb=args.get("usb"),
bios=args.get("bios"),
qemuFlags=args.get("qemuFlags"),
)
def is_up(self) -> bool:
return self.booted and self.connected
def log(self, msg: str) -> None:
rootlog.log(msg, {"machine": self.name})
def log_serial(self, msg: str) -> None:
rootlog.log_serial(msg, self.name)
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return rootlog.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
message = ("{}\n".format(command)).encode()
self.log("sending monitor command: {}".format(command))
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as
after timing out.
"""
def check_active(_: Any) -> bool:
info = self.get_unit_info(unit, user)
state = info["ActiveState"]
if state == "failed":
raise Exception('unit "{}" reached state "{}"'.format(unit, state))
if state == "inactive":
status, jobs = self.systemctl("list-jobs --full 2>&1", user)
if "No jobs" in jobs:
info = self.get_unit_info(unit, user)
if info["ActiveState"] == state:
raise Exception(
(
'unit "{}" is inactive and there ' "are no pending jobs"
).format(unit)
)
return state == "active"
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
if status != 0:
raise Exception(
'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
unit, "" if user is None else 'under user "{}"'.format(user), status
)
)
line_pattern = re.compile(r"^([^=]+)=(.*)$")
def tuple_from_line(line: str) -> Tuple[str, str]:
match = line_pattern.match(line)
assert match is not None
return match[1], match[2]
return dict(
tuple_from_line(line)
for line in lines.split("\n")
if line_pattern.match(line)
)
def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
if user is not None:
q = q.replace("'", "\\'")
return self.execute(
(
"su -l {} --shell /bin/sh -c "
"$'XDG_RUNTIME_DIR=/run/user/`id -u` "
"systemctl --user {}'"
).format(user, q)
)
return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested(
"checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
):
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state != require_state:
raise Exception(
"Expected unit ‘{}’ to to be in state ".format(unit)
+ "'{}' but it is in state ‘{}’".format(require_state, state)
)
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(self, command: str, check_return: bool = True) -> Tuple[int, str]:
self.connect()
out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
assert self.shell
self.shell.send(out_command.encode())
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
Should only be used during test development, not in the production test."""
self.connect()
self.log("Terminal is ready (there is no prompt):")
assert self.shell
subprocess.run(
["socat", "READLINE", f"FD:{self.shell.fileno()}"],
pass_fds=[self.shell.fileno()],
)
def succeed(self, *commands: str) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
"command `{}` failed (exit code {})".format(command, status)
)
output += out
return output
def fail(self, *commands: str) -> str:
"""Execute each command and check that it fails."""
output = ""
for command in commands:
with self.nested("must fail: {}".format(command)):
(status, out) = self.execute(command)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
)
output += out
return output
def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns success and return its output.
Throws an exception on timeout.
"""
output = ""
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success, timeout)
return output
def wait_until_fails(self, command: str) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
output = ""
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
retry(check_failure)
return output
def wait_for_shutdown(self) -> None:
if not self.booted:
return
with self.nested("waiting for the VM to power off"):
sys.stdout.flush()
assert self.process
self.process.wait()
self.pid = None
self.booted = False
self.connected = False
def get_tty_text(self, tty: str) -> str:
status, output = self.execute(
"fold -w$(stty -F /dev/tty{0} size | "
"awk '{{print $2}}') /dev/vcs{0}".format(tty)
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty)
if last:
self.log(
f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}"
)
return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
retry(tty_matches)
def send_chars(self, chars: List[str]) -> None:
with self.nested("sending keys ‘{}‘".format(chars)):
for char in chars:
self.send_key(char)
def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system."""
def check_file(_: Any) -> bool:
status, _ = self.execute("test -e {}".format(filename))
return status == 0
with self.nested("waiting for file ‘{}‘".format(filename)):
retry(check_file)
def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0
with self.nested("waiting for TCP port {}".format(port)):
retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("stop {}".format(jobname), user)
def wait_for_job(self, jobname: str) -> None:
self.wait_for_unit(jobname)
def connect(self) -> None:
if self.connected:
return
with self.nested("waiting for the VM to finish booting"):
self.start()
assert self.shell
tic = time.time()
self.shell.recv(1024)
# TODO: Timeout
toc = time.time()
self.log("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic))
self.connected = True
def screenshot(self, filename: str) -> None:
out_dir = os.environ.get("out", os.getcwd())
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename)
with self.nested(
"making screenshot {}".format(filename),
{"image": os.path.basename(filename)},
):
self.send_monitor_command("screendump {}".format(tmp))
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the
shell into the destination file. Works without host-guest shared folder.
Prefer copy_from_host for whenever possible.
"""
with open(source, "rb") as fh:
content_b64 = base64.b64encode(fh.read()).decode()
self.succeed(
f"mkdir -p $(dirname {target})",
f"echo -n {content_b64} | base64 -d > {target}",
)
def copy_from_host(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = Path(source)
vm_target = Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
if host_src.is_dir():
shutil.copytree(host_src, host_intermediate)
else:
shutil.copy(host_src, host_intermediate)
self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
def copy_from_vm(self, source: str, target_dir: str = "") -> None:
"""Copy a file from the VM (specified by an in-VM source path) to a path
relative to `$out`. The file is copied via the `shared_dir` shared among
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
out_dir = Path(os.environ.get("out", os.getcwd()))
vm_src = Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = Path(shared_td)
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
abs_target = out_dir / target_dir / vm_src.name
abs_target.parent.mkdir(exist_ok=True, parents=True)
# Copy the file from the shared directory outside VM
if intermediate.is_dir():
shutil.copytree(intermediate, abs_target)
else:
shutil.copy(intermediate, abs_target)
def dump_tty_contents(self, tty: str) -> None:
"""Debugging: Dump the contents of the TTY<n>"""
self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
screenshot_path = os.path.join(tmpdir, "ppm")
self.send_monitor_command(f"screendump {screenshot_path}")
return _perform_ocr_on_screenshot(screenshot_path, model_ids)
def get_screen_text_variants(self) -> List[str]:
return self._get_screen_text_variants([0, 1, 2])
def get_screen_text(self) -> str:
return self._get_screen_text_variants([2])[0]
def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool:
variants = self.get_screen_text_variants()
for text in variants:
if re.search(regex, text) is not None:
return True
if last:
self.log("Last OCR attempt failed. Text was: {}".format(variants))
return False
with self.nested("waiting for {} to appear on screen".format(regex)):
retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None:
self.log("waiting for {} to appear on console".format(regex))
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
time.sleep(0.01)
def start(self) -> None:
if self.booted:
return
self.log("starting vm")
def clear(path: Path) -> Path:
if path.exists():
path.unlink()
return path
def create_socket(path: Path) -> socket.socket:
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(str(path))
s.listen(1)
return s
monitor_socket = create_socket(clear(self.monitor_path))
shell_socket = create_socket(clear(self.shell_path))
self.process = self.start_command.run(
self.state_dir,
self.shared_dir,
self.monitor_path,
self.shell_path,
)
self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept()
# Store last serial console lines for use
# of wait_for_console_text
self.last_lines: Queue = Queue()
def process_serial_output() -> None:
assert self.process
assert self.process.stdout
for _line in self.process.stdout:
# Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip()
self.last_lines.put(line)
self.log_serial(line)
self.serial_thread = threading.Thread(target=process_serial_output)
self.serial_thread.start()
self.wait_for_monitor_prompt()
self.pid = self.process.pid
self.booted = True
self.log("QEMU running (pid {})".format(self.pid))
def cleanup_statedir(self) -> None:
shutil.rmtree(self.state_dir)
rootlog.log(f"deleting VM state directory {self.state_dir}")
rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
def shutdown(self) -> None:
if not self.booted:
return
assert self.shell
self.shell.send("poweroff\n".encode())
self.wait_for_shutdown()
def crash(self) -> None:
if not self.booted:
return
self.log("forced crash")
self.send_monitor_command("quit")
self.wait_for_shutdown()
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.
"""
def check_x(_: Any) -> bool:
cmd = (
"journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ 'grep "Reached target Current graphical"'
)
status, _ = self.execute(cmd)
if status != 0:
return False
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
pattern = re.compile(regexp)
def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names()
if last_try:
self.log(
"Last chance to match {} on the window list,".format(regexp)
+ " which currently contains: "
+ ", ".join(names)
)
return any(pattern.search(name) for name in names)
with self.nested("Waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
# We want to sleep in *guest* time, not *host* time.
self.succeed(f"sleep {secs}")
def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
"""Forward a TCP port on the host to a TCP port on the guest.
Useful during interactive testing.
"""
self.send_monitor_command(
"hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
)
def block(self) -> None:
"""Make the machine unreachable by shutting down eth1 (the multicast
interface used to talk to the other VMs). We keep eth0 up so that
the test driver can continue to talk to the machine.
"""
self.send_monitor_command("set_link virtio-net-pci.1 off")
def unblock(self) -> None:
"""Make the machine reachable."""
self.send_monitor_command("set_link virtio-net-pci.1 on")
def release(self) -> None:
if self.pid is None:
return
rootlog.info(f"kill machine (pid {self.pid})")
assert self.process
assert self.shell
assert self.monitor
assert self.serial_thread
self.process.terminate()
self.shell.close()
self.monitor.close()
self.serial_thread.join()
class VLan:
"""This class handles a VLAN that the run-vm scripts identify via its
number handles. The network's lifetime equals the object's lifetime.
"""
nr: int
socket_dir: Path
process: subprocess.Popen
pid: int
fd: io.TextIOBase
def __repr__(self) -> str:
return f"<Vlan Nr. {self.nr}>"
def __init__(self, nr: int, tmp_dir: Path):
self.nr = nr
self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
# TODO: don't side-effect environment here
os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir)
rootlog.info("start vlan")
pty_master, pty_slave = pty.openpty()
self.process = subprocess.Popen(
["vde_switch", "-s", self.socket_dir, "--dirmode", "0700"],
stdin=pty_slave,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
self.pid = self.process.pid
self.fd = os.fdopen(pty_master, "w")
self.fd.write("version\n")
# TODO: perl version checks if this can be read from
# an if not, dies. we could hang here forever. Fix it.
assert self.process.stdout is not None
self.process.stdout.readline()
if not (self.socket_dir / "ctl").exists():
rootlog.error("cannot start vde_switch")
rootlog.info(f"running vlan (pid {self.pid})")
def __del__(self) -> None:
rootlog.info(f"kill vlan (pid {self.pid})")
self.fd.close()
self.process.terminate()
class Driver:
"""A handle to the driver that sets up the environment
and runs the tests"""
tests: str
vlans: List[VLan]
machines: List[Machine]
def __init__(
self,
start_scripts: List[str],
vlans: List[int],
tests: str,
keep_vm_state: bool = False,
):
self.tests = tests
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
with rootlog.nested("start all VLans"):
self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
def cmd(scripts: List[str]) -> Iterator[NixStartScript]:
for s in scripts:
yield NixStartScript(s)
self.machines = [
Machine(
start_command=cmd,
keep_vm_state=keep_vm_state,
name=cmd.machine_name,
tmp_dir=tmp_dir,
)
for cmd in cmd(start_scripts)
]
def __enter__(self) -> "Driver":
return self
def __exit__(self, *_: Any) -> None:
with rootlog.nested("cleanup"):
for machine in self.machines:
machine.release()
def subtest(self, name: str) -> Iterator[None]:
"""Group logs under a given test name"""
with rootlog.nested(name):
try:
yield
return True
except Exception as e:
rootlog.error(f'Test "{name}" failed with error: "{e}"')
raise e
def test_symbols(self) -> Dict[str, Any]:
@contextmanager
def subtest(name: str) -> Iterator[None]:
return self.subtest(name)
general_symbols = dict(
start_all=self.start_all,
test_script=self.test_script,
machines=self.machines,
vlans=self.vlans,
driver=self,
log=rootlog,
os=os,
create_machine=self.create_machine,
subtest=subtest,
run_tests=self.run_tests,
join_all=self.join_all,
retry=retry,
serial_stdout_off=self.serial_stdout_off,
serial_stdout_on=self.serial_stdout_on,
Machine=Machine, # for typing
)
machine_symbols = {m.name: m for m in self.machines}
# If there's exactly one machine, make it available under the name
# "machine", even if it's not called that.
if len(self.machines) == 1:
(machine_symbols["machine"],) = self.machines
vlan_symbols = {
f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
}
print(
"additionally exposed symbols:\n "
+ ", ".join(map(lambda m: m.name, self.machines))
+ ",\n "
+ ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans))
+ ",\n "
+ ", ".join(list(general_symbols.keys()))
)
return {**general_symbols, **machine_symbols, **vlan_symbols}
def test_script(self) -> None:
"""Run the test script"""
with rootlog.nested("run the VM test script"):
symbols = self.test_symbols() # call eagerly
exec(self.tests, symbols, None)
def run_tests(self) -> None:
"""Run the test script (for non-interactive test runs)"""
self.test_script()
# TODO: Collect coverage data
for machine in self.machines:
if machine.is_up():
machine.execute("sync")
def start_all(self) -> None:
"""Start all machines"""
with rootlog.nested("start all VMs"):
for machine in self.machines:
machine.start()
def join_all(self) -> None:
"""Wait for all machines to shut down"""
with rootlog.nested("wait for all VMs to finish"):
for machine in self.machines:
machine.wait_for_shutdown()
def create_machine(self, args: Dict[str, Any]) -> Machine:
rootlog.warning(
"Using legacy create_machine(), please instantiate the"
"Machine class directly, instead"
)
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
if args.get("startCommand"):
start_command: str = args.get("startCommand", "")
cmd = NixStartScript(start_command)
name = args.get("name", cmd.machine_name)
else:
cmd = Machine.create_startcommand(args) # type: ignore
name = args.get("name", "machine")
return Machine(
tmp_dir=tmp_dir,
start_command=cmd,
name=name,
keep_vm_state=args.get("keep_vm_state", False),
allow_reboot=args.get("allow_reboot", False),
)
def serial_stdout_on(self) -> None:
rootlog._print_serial_logs = True
def serial_stdout_off(self) -> None:
rootlog._print_serial_logs = False
class EnvDefault(argparse.Action):
"""An argpars Action that takes values from the specified
environment variable as the flags default value.
"""
def __init__(self, envvar, required=False, default=None, nargs=None, **kwargs): # type: ignore
if not default and envvar:
if envvar in os.environ:
if nargs is not None and (nargs.isdigit() or nargs in ["*", "+"]):
default = os.environ[envvar].split()
else:
default = os.environ[envvar]
kwargs["help"] = (
kwargs["help"] + f" (default from environment: {default})"
)
if required and default:
required = False
super(EnvDefault, self).__init__(
default=default, required=required, nargs=nargs, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None): # type: ignore
setattr(namespace, self.dest, values)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
arg_parser.add_argument(
"-K",
"--keep-vm-state",
help="re-use a VM state coming from a previous run",
action="store_true",
)
arg_parser.add_argument(
"-I",
"--interactive",
help="drop into a python repl and run the tests interactively",
action="store_true",
)
arg_parser.add_argument(
"--start-scripts",
metavar="START-SCRIPT",
action=EnvDefault,
envvar="startScripts",
nargs="*",
help="start scripts for participating virtual machines",
)
arg_parser.add_argument(
"--vlans",
metavar="VLAN",
action=EnvDefault,
envvar="vlans",
nargs="*",
help="vlans to span by the driver",
)
arg_parser.add_argument(
"testscript",
action=EnvDefault,
envvar="testScript",
help="the test script to run",
type=Path,
)
args = arg_parser.parse_args()
if not args.keep_vm_state:
rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
with Driver(
args.start_scripts, args.vlans, args.testscript.read_text(), args.keep_vm_state
) as driver:
if args.interactive:
ptpython.repl.embed(driver.test_symbols(), {})
else:
tic = time.time()
driver.run_tests()
toc = time.time()
rootlog.info(f"test script finished in {(toc-tic):.2f}s")
|
digitalocean-cluster-manager.py
|
import time
import threading
import http.server
import random
import json
import digitalocean
SECRET = '[REDACTED]'
BEARER = '[REDACTED]'
manager = digitalocean.Manager(token=BEARER)
keys = manager.get_all_sshkeys()
baseApi = digitalocean.baseapi.BaseAPI(token=manager.token)
projects = manager.get_all_projects()
project = [project for project in projects if project.name == 'TBP'][0]
lbs = manager.get_all_load_balancers()
lb = [lb for lb in lbs if lb.name == 'TBP'][0]
droplet_ids = set(lb.droplet_ids)
droplets = [manager.get_droplet(droplet_id) for droplet_id in droplet_ids]
main_droplet = [droplet for droplet in droplets if 'main' in droplet.tags][0]
def get_lb_size():
lb.load()
return len(lb.droplet_ids)
def wait_for_droplet(droplet):
while True:
actions = droplet.get_actions()
count = 0
for action in actions:
if action.status == 'completed':
count += 1
continue
action.load()
if action.status == 'completed':
count += 1
else:
break
if count == len(actions):
return
time.sleep(2)
def add_droplets(num=1):
snapshots = manager.get_all_snapshots()
snapshots = [snapshot for snapshot in snapshots if snapshot.name == 'tbp-snapshot']
snapshots.sort(key=lambda x: x.created_at, reverse=True)
snapshot = snapshots[0]
tag = digitalocean.Tag(token=manager.token, name='lb')
tag.create()
droplets = []
for _ in range(num):
droplet = digitalocean.Droplet(token=manager.token,
region=main_droplet.region['slug'],
size_slug='s-1vcpu-2gb',
ssh_keys=keys,
vpc_uuid=main_droplet.vpc_uuid,
image=snapshot.id,
name='tbp-worker',
monitoring=True,
backup=False)
droplet.create()
droplets.append(droplet)
droplet_ids.add(droplet.id)
tag.add_droplets([droplet.id for droplet in droplets])
return droplets
def remove_droplets(num=1):
worker_ids = set([droplet_id for droplet_id in droplet_ids if droplet_id != main_droplet.id])
for _ in range(num):
if len(worker_ids) == 0:
return
worker_id = random.sample(list(worker_ids), 1)[0]
def remove_droplet(worker_id):
print('Delete droplet', worker_id)
worker = manager.get_droplet(worker_id)
wait_for_droplet(worker)
droplet_ids.remove(worker_id)
worker.destroy()
threading.Thread(target=remove_droplet, args=(worker_id,)).start()
def get_cpu_usage(droplet):
timestamp = int(time.time())
res = baseApi.get_data(url='monitoring/metrics/droplet/cpu',
type=digitalocean.baseapi.GET,
params={
'host_id': droplet.id,
'start': timestamp - 61,
'end': timestamp
})
res = res['data']['result']
prev_metrics = {}
metrics = {}
for r in res:
prev_metrics[r['metric']['mode']] = float(r['values'][0][1])
metrics[r['metric']['mode']] = float(r['values'][1][1])
def get_stats(metrics):
idle = metrics['idle'] + metrics['iowait']
non_idle = metrics['user'] + metrics['nice'] + metrics['system'] + \
metrics['irq'] + metrics['softirq'] + metrics['steal']
return idle, non_idle
prev_idle, prev_non_idle = get_stats(prev_metrics)
idle, non_idle = get_stats(metrics)
idle -= prev_idle
non_idle -= prev_non_idle
if idle + non_idle == 0:
return 0
return non_idle / (idle + non_idle)
def listen():
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
try:
length = int(self.headers['Content-Length'])
body = json.loads(self.rfile.read(length).decode('utf-8'))
if 'upperBound' not in body or 'lowerBound' not in body or \
'secret' not in body or body['secret'] != SECRET:
self.send_response(400, 'Wrong request.')
else:
with open('do-manage', 'w') as fs:
fs.write(str(body['lowerBound']) + ',' + str(body['upperBound']))
self.send_response(201, 'Yay.')
self.end_headers()
except Exception:
return
server_address = ('0.0.0.0', 1885)
httpd = http.server.HTTPServer(server_address, RequestHandler)
httpd.serve_forever()
def get_bounds():
try:
with open('do-manage') as fs:
text = fs.readline()
parts = text.strip().split(',')
lower, upper = int(parts[0]), int(parts[1])
if upper < lower:
return 1, -1
return lower, upper
except Exception as e:
print(e)
return 1, -1
if __name__ == "__main__":
threading.Thread(target=listen).start()
while True:
lb.load()
droplet_ids = set(lb.droplet_ids)
cpu_usage = get_cpu_usage(main_droplet)
print('CPU Usage:', cpu_usage)
if cpu_usage > .8:
target_droplet_count = len(droplet_ids) + 2
elif cpu_usage < .5:
target_droplet_count = len(droplet_ids) - 1
lower, upper = get_bounds()
if upper > 0:
target_droplet_count = min(target_droplet_count, upper)
target_droplet_count = max(target_droplet_count, lower)
diff = target_droplet_count - len(droplet_ids)
if diff < 0:
print('Removing', -diff, 'droplets')
remove_droplets(num=-diff)
time.sleep(120)
elif diff > 0:
print('Adding', diff, 'droplets')
add_droplets(num=diff)
time.sleep(180)
else:
print(len(droplet_ids), 'droplets running')
time.sleep(10)
|
webcore.py
|
# Copyright 2011,2012,2018 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Webcore is a basic web server framework based on the SocketServer-based
BaseHTTPServer that comes with Python. The big difference is that this
one can carve up URL-space by prefix, such that "/foo/*" gets handled by
a different request handler than "/bar/*". I refer to this as "splitting".
You should also be able to make a request handler written without splitting
run under Webcore. This may not work for all request handlers, but it
definitely works for some. :) The easiest way to do this is with the
wrapRequestHandler() function, like so:
from CGIHTTPServer import CGIHTTPRequestHandler as CHRH
core.WebServer.set_handler("/foo", wrapRequestHandler(CHRH))
.. now URLs under the /foo/ directory will let you browse through the
filesystem next to pox.py. If you create a cgi-bin directory next to
pox.py, you'll be able to run executables in it.
For this specific purpose, there's actually a SplitCGIRequestHandler
which demonstrates wrapping a normal request handler while also
customizing it a bit -- SplitCGIRequestHandler shoehorns in functionality
to use arbitrary base paths.
BaseHTTPServer is not very fast and needs to run on its own thread.
It'd actually be great to have a version of this written against, say,
CherryPy, but I did want to include a simple, dependency-free web solution.
"""
from socketserver import ThreadingMixIn
from http.server import *
from time import sleep
import select
import threading
from .authentication import BasicAuthMixin
from pox.core import core
from pox.lib.revent import Event, EventMixin
import os
import socket
import posixpath
import urllib.request, urllib.parse, urllib.error
import cgi
import errno
from io import StringIO, BytesIO
log = core.getLogger()
try:
weblog = log.getChild("server")
except:
# I'm tired of people running Python 2.6 having problems with this.
#TODO: Remove this someday.
weblog = core.getLogger("webcore.server")
def _setAttribs (parent, child):
attrs = ['command', 'request_version', 'close_connection',
'raw_requestline', 'requestline', 'path', 'headers', 'wfile',
'rfile', 'server', 'client_address', 'connection', 'request']
for a in attrs:
setattr(child, a, getattr(parent, a))
setattr(child, 'parent', parent)
import weakref
class ShutdownHelper (object):
"""
Shuts down sockets for reading when POX does down
Modern browsers may open (or leave open) HTTP connections without sending
a request for quite a while. Python's webserver will open requests for
these which will then just block at the readline() in handle_one_request().
The downside here is that when POX tries to shut down, those threads are
left hanging. We could change things so that it didn't just blindly call
and block on readline. Or we could make the handler threads daemon threads.
But instead, we just keep track of the sockets. When POX wants to go down,
we'll shutdown() the sockets for reading, which will get readline() unstuck
and let POX close cleanly.
"""
sockets = None
def __init__ (self):
core.add_listener(self._handle_GoingDownEvent)
def _handle_GoingDownEvent (self, event):
if self.sockets is None: return
cc = dict(self.sockets)
self.sockets.clear()
#if cc: log.debug("Shutting down %s socket(s)", len(cc))
for s,(r,w,c) in cc.items():
try:
if r and w: flags = socket.SHUT_RDWR
elif r: flags = socket.SHUT_RD
elif w: slags = socket.SHUT_WR
if r or w: s.shutdown(flags)
except Exception as e:
pass
if c:
try:
s.close()
except Exception:
pass
if cc: log.debug("Shut down %s socket(s)", len(cc))
def register (self, socket, read=True, write=False, close=False):
if self.sockets is None:
self.sockets = weakref.WeakKeyDictionary()
self.sockets[socket] = (read,write,close)
def unregister (self, socket):
if self.sockets is None: return
try:
del self.sockets[socket]
except Exception as e:
pass
_shutdown_helper = ShutdownHelper()
from http.cookies import SimpleCookie
POX_COOKIEGUARD_DEFAULT_COOKIE_NAME = "POXCookieGuardCookie"
def _gen_cgc ():
#TODO: Use Python 3 secrets module
import random
import datetime
import hashlib
try:
rng = random.SystemRandom()
except Exception:
log.error("Using insecure pseudorandom number for POX CookieGuard")
rng = random.Random()
data = "".join([str(rng.randint(0,9)) for _ in range(1024)])
data += str(datetime.datetime.now())
data += str(id(data))
data = data.encode()
return hashlib.sha256(data).hexdigest()
import urllib
from urllib.parse import quote_plus, unquote_plus
class POXCookieGuardMixin (object):
"""
This is a CSRF mitigation we call POX CookieGuard. This only stops
CSRF with modern browsers, but has the benefit of not requiring
requesters to do anything particularly special. In particular, if you
are doing something like using curl from the commandline to call JSON-RPCs,
you don't need to do anything tricky like fetch an auth token and then
include it in the RPC -- all you need is cookie support. Basically this
works by having POX give you an authentication token in a cookie. This
uses SameSite=Strict so that other sites can't convince the browser to
send it.
"""
_pox_cookieguard_bouncer = "/_poxcookieguard/bounce"
_pox_cookieguard_secret = _gen_cgc()
_pox_cookieguard_cookie_name = POX_COOKIEGUARD_DEFAULT_COOKIE_NAME
_pox_cookieguard_consume_post = True
def _cookieguard_maybe_consume_post (self):
if self._pox_cookieguard_consume_post is False: return
if self.command != "POST": return
# Read rest of input to avoid connection reset
cgi.FieldStorage( fp = self.rfile, headers = self.headers,
environ={ 'REQUEST_METHOD':'POST' } )
def _get_cookieguard_cookie (self):
return self._pox_cookieguard_secret
def _get_cookieguard_cookie_path (self, requested):
"""
Gets the path to be used for the cookie
"""
return "/"
def _do_cookieguard_explict_continuation (self, requested, target):
"""
Sends explicit continuation page
"""
log.debug("POX CookieGuard bouncer doesn't have correct cookie; "
"Sending explicit continuation page")
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(("""
<html><head><title>POX CookieGuard</title></head>
<body>
A separate site has linked you here. If this was intentional,
please <a href="%s">continue to %s</a>.
</body>
</html>
""" % (target, cgi.escape(target))).encode())
def _do_cookieguard_set_cookie (self, requested, bad_cookie):
"""
Sets the cookie and redirects
bad_cookie is True if the cookie was set but is wrong.
"""
self._cookieguard_maybe_consume_post()
self.send_response(307, "Temporary Redirect")
#TODO: Set Secure automatically if being accessed by https.
#TODO: Set Path cookie attribute
self.send_header("Set-Cookie",
"%s=%s; SameSite=Strict; HttpOnly; path=%s"
% (self._pox_cookieguard_cookie_name,
self._get_cookieguard_cookie(),
self._get_cookieguard_cookie_path(requested)))
self.send_header("Location", self._pox_cookieguard_bouncer + "?"
+ quote_plus(requested))
self.end_headers()
def _do_cookieguard (self, override=None):
do_cg = override
if do_cg is None: do_cg = getattr(self, 'pox_cookieguard', True)
if not do_cg: return True
requested = self.raw_requestline.split()[1].decode("latin-1")
cookies = SimpleCookie(self.headers.get('Cookie'))
cgc = cookies.get(self._pox_cookieguard_cookie_name)
if cgc and cgc.value == self._get_cookieguard_cookie():
if requested.startswith(self._pox_cookieguard_bouncer + "?"):
log.debug("POX CookieGuard cookie is valid -- bouncing")
qs = requested.split("?",1)[1]
self._cookieguard_maybe_consume_post()
self.send_response(307, "Temporary Redirect")
self.send_header("Location", unquote_plus(qs))
self.end_headers()
return False
log.debug("POX CookieGuard cookie is valid")
return True
else:
# No guard cookie or guard cookie is wrong
if requested.startswith(self._pox_cookieguard_bouncer + "?"):
# Client probably didn't save cookie
qs = requested.split("?",1)[1]
target = unquote_plus(qs)
bad_qs = quote_plus(target) != qs
if bad_qs or self.command != "GET":
log.warn("Bad POX CookieGuard bounce; possible attack "
"(method:%s cookie:%s qs:%s)",
self.command,
"bad" if cgc else "missing",
"bad" if bad_qs else "okay")
self.send_response(400, "Bad Request")
self.end_headers()
return False
self._do_cookieguard_explict_continuation(requested, target)
return False
if cgc:
log.debug("POX CookieGuard got wrong cookie -- setting new one")
else:
log.debug("POX CookieGuard got no cookie -- setting one")
self._do_cookieguard_set_cookie(requested, bool(cgc))
return False
import http.server
from http.server import SimpleHTTPRequestHandler
class SplitRequestHandler (BaseHTTPRequestHandler):
"""
To write HTTP handlers for POX, inherit from this class instead of
BaseHTTPRequestHandler. The interface should be the same -- the same
variables should be set, and the same do_GET(), etc. methods should
be called.
In addition, there will be a self.args which can be specified
when you set_handler() on the server.
"""
# Also a StreamRequestHandler
def __init__ (self, parent, prefix, args):
_setAttribs(parent, self)
self.parent = parent
self.args = args
self.prefix = prefix
self._init()
def _init (self):
"""
This is called by __init__ during initialization. You can
override it to, for example, parse .args.
"""
pass
@classmethod
def format_info (cls, args):
"""
Get an info string about this handler
This is displayed, for example, in the "Web Prefixes" list of the default
POX web server page.
"""
def shorten (s, length=100):
s = str(s)
if len(s) > length: s = s[:length] + "..."
return s
return shorten(str(args))
def version_string (self):
return "POX/%s(%s) %s" % (".".join(map(str, core.version)),
core.version_name,
BaseHTTPRequestHandler.version_string(self))
def handle_one_request (self):
raise RuntimeError("Not supported")
def handle(self):
raise RuntimeError("Not supported")
def _split_dispatch (self, command, handler = None):
if handler is None: handler = self
mname = 'do_' + self.command
if not hasattr(handler, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(handler, mname)
return method()
def log_request (self, code = '-', size = '-'):
weblog.debug(self.prefix + (':"%s" %s %s' %
(self.requestline, str(code), str(size))))
def log_error (self, fmt, *args):
weblog.error(self.prefix + ':' + (fmt % args))
def log_message (self, fmt, *args):
weblog.info(self.prefix + ':' + (fmt % args))
_favicon = ("47494638396110001000c206006a5797927bc18f83ada9a1bfb49ceabda"
+ "4f4ffffffffffff21f904010a0007002c000000001000100000034578badcfe30b20"
+ "1c038d4e27a0f2004e081e2172a4051942abba260309ea6b805ab501581ae3129d90"
+ "1275c6404b80a72f5abcd4a2454cb334dbd9e58e74693b97425e07002003b")
_favicon = bytes(int(_favicon[n:n+2],16)
for n in range(0,len(_favicon),2))
class CoreHandler (SplitRequestHandler):
"""
A default page to say hi from POX.
"""
def do_GET (self):
"""Serve a GET request."""
self.do_content(True)
def do_HEAD (self):
"""Serve a HEAD request."""
self.do_content(False)
def do_content (self, is_get):
if self.path == "/":
self.send_info(is_get)
elif self.path.startswith("/favicon."):
self.send_favicon(is_get)
else:
self.send_error(404, "File not found on CoreHandler")
def send_favicon (self, is_get = False):
self.send_response(200)
self.send_header("Content-type", "image/gif")
self.send_header("Content-Length", str(len(_favicon)))
self.end_headers()
if is_get:
self.wfile.write(_favicon)
def send_info (self, is_get = False):
r = "<html><head><title>POX</title></head>\n"
r += "<body>\n<h1>POX Webserver</h1>\n<h2>Components</h2>\n"
r += "<ul>"
for k in sorted(core.components):
v = core.components[k]
r += "<li>%s - %s</li>\n" % (cgi.escape(str(k)), cgi.escape(str(v)))
r += "</ul>\n\n<h2>Web Prefixes</h2>"
r += "<ul>"
m = [list(map(cgi.escape, map(str, [x[0],x[1],x[1].format_info(x[3])])))
for x in self.args.matches]
m.sort()
for v in m:
r += "<li><a href='{0}'>{0}</a> - {1} {2}</li>\n".format(*v)
r += "</ul></body></html>\n"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(r)))
self.end_headers()
if is_get:
self.wfile.write(r.encode())
class StaticContentHandler (SplitRequestHandler, SimpleHTTPRequestHandler):
"""
A SplitRequestHandler for serving static content
This is largely the same as the Python SimpleHTTPRequestHandler, but
we modify it to serve from arbitrary directories at arbitrary
positions in the URL space.
"""
server_version = "StaticContentHandler/1.0"
def send_head (self):
# We override this and handle the directory redirection case because
# we want to include the per-split prefix.
path = self.translate_path(self.path)
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(302)
self.send_header("Location", self.prefix + self.path + "/")
self.end_headers()
return None
return SimpleHTTPRequestHandler.send_head(self)
def list_directory (self, dirpath):
# dirpath is an OS path
try:
d = os.listdir(dirpath)
except OSError as e:
if e.errno == errno.EACCES:
self.send_error(403, "This directory is not listable")
elif e.errno == errno.ENOENT:
self.send_error(404, "This directory does not exist")
else:
self.send_error(400, "Unknown error")
return None
d.sort(key=str.lower)
r = StringIO()
r.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
path = posixpath.join(self.prefix, cgi.escape(self.path).lstrip("/"))
r.write("<html><head><title>" + path + "</title></head>\n")
r.write("<body><pre>")
parts = path.rstrip("/").split("/")
r.write('<a href="/">/</a>')
for i,part in enumerate(parts):
link = urllib.parse.quote("/".join(parts[:i+1]))
if i > 0: part += "/"
r.write('<a href="%s">%s</a>' % (link, cgi.escape(part)))
r.write("\n" + "-" * (0+len(path)) + "\n")
dirs = []
files = []
for f in d:
if f.startswith("."): continue
if os.path.isdir(os.path.join(dirpath, f)):
dirs.append(f)
else:
files.append(f)
def entry (n, rest=''):
link = urllib.parse.quote(n)
name = cgi.escape(n)
r.write('<a href="%s">%s</a>\n' % (link,name+rest))
for f in dirs:
entry(f, "/")
for f in files:
entry(f)
r.write("</pre></body></html>")
r.seek(0)
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(r.getvalue())))
self.end_headers()
return BytesIO(r.read().encode())
def translate_path (self, path, include_prefix = True):
"""
Translate a web-path to a local filesystem path
Odd path elements (e.g., ones that contain local filesystem path
separators) are stripped.
"""
def fixpath (p):
o = []
skip = 0
while True:
p,tail = posixpath.split(p)
if p in ('/','') and tail == '': break
if tail in ('','.', os.path.curdir, os.path.pardir): continue
if os.path.sep in tail: continue
if os.path.altsep and os.path.altsep in tail: continue
if os.path.splitdrive(tail)[0] != '': continue
if tail == '..':
skip += 1
continue
if skip:
skip -= 1
continue
o.append(tail)
o.reverse()
return o
# Remove query string / fragment
if "?" in path: path = path[:path.index("?")]
if "#" in path: path = path[:path.index("#")]
path = fixpath(path)
if path:
path = os.path.join(*path)
else:
path = ''
if include_prefix:
path = os.path.join(os.path.abspath(self.args['root']), path)
return path
def wrapRequestHandler (handlerClass):
return type("Split" + handlerClass.__name__,
(SplitRequestHandler, handlerClass, object), {})
from http.server import CGIHTTPRequestHandler
class SplitCGIRequestHandler (SplitRequestHandler,
CGIHTTPRequestHandler, object):
"""
Runs CGIRequestHandler serving from an arbitrary path.
This really should be a feature of CGIRequestHandler and the way of
implementing it here is scary and awful, but it at least sort of works.
"""
__lock = threading.Lock()
def _split_dispatch (self, command):
with self.__lock:
olddir = os.getcwd()
try:
os.chdir(self.args)
return SplitRequestHandler._split_dispatch(self, command)
finally:
os.chdir(olddir)
class SplitterRequestHandler (BaseHTTPRequestHandler, BasicAuthMixin,
POXCookieGuardMixin):
basic_auth_info = {} # username -> password
basic_auth_enabled = None
pox_cookieguard = True
def __init__ (self, *args, **kw):
if self.basic_auth_info:
self.basic_auth_enabled = True
#self.rec = Recording(args[0])
#self.args = args
#self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
#BaseHTTPRequestHandler.__init__(self, self.rec, *args[1:], **kw)
try:
BaseHTTPRequestHandler.__init__(self, *args, **kw)
except socket.error as e:
if e.errno == errno.EPIPE:
weblog.warn("Broken pipe (unclean client disconnect?)")
else:
raise
finally:
_shutdown_helper.unregister(self.connection)
def log_request (self, code = '-', size = '-'):
weblog.debug('splitter:"%s" %s %s',
self.requestline, str(code), str(size))
def log_error (self, fmt, *args):
weblog.error('splitter:' + fmt % args)
def log_message (self, fmt, *args):
weblog.info('splitter:' + fmt % args)
def version_string (self):
return "POX/%s(%s) %s" % (".".join(map(str, core.version)),
core.version_name,
BaseHTTPRequestHandler.version_string(self))
def _check_basic_auth (self, user, password):
if self.basic_auth_info.get(user) == password: return True
import web.authentication
web.authentication.log.warn("Authentication failure")
return False
def _get_auth_realm (self):
return "POX"
def handle_one_request(self):
_shutdown_helper.register(self.connection)
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
if not self._do_auth(): return
handler = None
while True:
for m in self.server.matches:
if self.path.startswith(m[0]):
#print m,self.path
handler = m[1](self, m[0], m[3])
#pb = self.rec.getPlayback()
#handler = m[1](pb, *self.args[1:])
_setAttribs(self, handler)
if m[2]:
# Trim. Behavior is not "perfect"
handler.path = self.path[len(m[0]):]
if m[0].endswith('/'):
handler.path = '/' + handler.path
break
if handler is None:
handler = self
if not self.path.endswith('/'):
# Handle splits like directories
self.send_response(302)
self.send_header("Location", self.path + "/")
self.end_headers()
break
break
override_cg = getattr(handler, "pox_cookieguard", None)
if not self._do_cookieguard(override_cg): return
event = WebRequest(self, handler)
self.server.raiseEventNoErrors(event)
if event.handler:
return event.handler._split_dispatch(self.command)
class WebRequest (Event):
"""
Hook for requests on the POX web server.
This event is fired when the webserver is going to handle a request.
The listener can modify the .handler to change how the event is
handled. Or it can just be used to spy on requests.
If the handler is the splitter itself, then the page wasn't found.
"""
splitter = None
handler = None
def __init__ (self, splitter, handler):
self.splitter = splitter
self.handler = handler
def set_handler (self, handler_class):
"""
Set a new handler class
"""
h = self.handler
self.handler = handler_class(h.parent, h.prefix, h.args)
class SplitThreadedServer(ThreadingMixIn, HTTPServer, EventMixin):
_eventMixin_events = set([WebRequest])
matches = [] # Tuples of (Prefix, TrimPrefix, Handler)
def __init__ (self, *args, **kw):
self.matches = list(self.matches)
self.ssl_server_key = kw.pop("ssl_server_key", None)
self.ssl_server_cert = kw.pop("ssl_server_cert", None)
self.ssl_client_certs = kw.pop("ssl_client_certs", None)
HTTPServer.__init__(self, *args, **kw)
# self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
self.ssl_enabled = False
if self.ssl_server_key or self.ssl_server_cert or self.ssl_client_certs:
import ssl
# The Python SSL stuff being used this way means that failing to set up
# SSL can hang a connection open, which is annoying if you're trying to
# shut down POX. Do something about this later.
cert_reqs = ssl.CERT_REQUIRED
if self.ssl_client_certs is None:
cert_reqs = ssl.CERT_NONE
self.socket = ssl.wrap_socket(self.socket, server_side=True,
keyfile = self.ssl_server_key, certfile = self.ssl_server_cert,
ca_certs = self.ssl_client_certs, cert_reqs = cert_reqs,
do_handshake_on_connect = True,
ssl_version = ssl.PROTOCOL_TLSv1_2,
suppress_ragged_eofs = True)
self.ssl_enabled = True
def set_handler (self, prefix, handler, args = None, trim_prefix = True):
# Not very efficient
assert (handler is None) or (issubclass(handler, SplitRequestHandler))
self.matches = [m for m in self.matches if m[0] != prefix]
if handler is None: return
self.matches.append((prefix, handler, trim_prefix, args))
self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def add_static_dir (self, www_path, local_path=None, relative=False):
"""
Serves a directory of static content.
www_path is the prefix of the URL that maps to this directory.
local_path is the directory to serve content from. If it's not
specified, it is assume to be a directory with the same name as
www_path.
relative, if True, means that the local path is to be a sibling
of the calling module.
For an example, see the launch() function in this module.
"""
if not www_path.startswith('/'): www_path = '/' + www_path
if local_path is None:
local_path = www_path[1:]
if relative:
local_path = os.path.basename(local_path)
if relative:
import inspect
path = inspect.stack()[1][1]
path = os.path.dirname(path)
local_path = os.path.join(path, local_path)
local_path = os.path.abspath(local_path)
log.debug("Serving %s at %s", local_path, www_path)
self.set_handler(www_path, StaticContentHandler,
{'root':local_path}, True);
class InternalContentHandler (SplitRequestHandler):
"""
Serves data from inside the application, without backing files
When it receives a GET or a HEAD, it translates the path from something
like "/foo/bar.txt" to "foo__bar_txt". It then tries several things:
1) Looking up an attribute on the handler called "GET_foo__bar_txt".
2) Treating self.args as a dictionary and looking for
self.args["/foo/bar.txt"].
3) Looking on self.args for an attribute called "GET_foo__bar_txt".
4) Looking up an attribute on the handler called "GETANY".
5) Looking up the key self.args[None].
6) Looking up the attribute "GETANY" on self.args.
Whichever of these it gets, it the result is callable, it calls it,
passing the request itself as the argument (so if the thing is a
method, it'll essentially just be self twice).
The attribute or return value is ideally a tuple of (mime-type, bytes,
headers). You may omit the headers. If you include it, it can either
be a dictionary or a list of name/value pairs. If you return a string
or bytes instead of such a tuple, it'll try to guess between HTML or
plain text. It'll then send that to the client. Easy!
When a handler is set up with set_handler(), the third argument becomes
self.args on the request. So that lets you put data into an
InternalContentHandler without subclassing. Or just subclass it.
For step 2 above, it will also look up the given path plus a slash. If
it finds it, it'll do an HTTP redirect to it. In this way, you can
provide things which look like directories by including the slashed
versions in the dictionary.
"""
args_content_lookup = True # Set to false to disable lookup on .args
def do_GET (self):
self.do_response(True)
def do_HEAD (self):
self.do_response(False)
def do_response (self, is_get):
path = "<Unknown>"
try:
path = self.path.lstrip("/").replace("/","__").replace(".","_")
r = getattr(self, "GET_" + path, None)
if r is None and self.args is not None and self.args_content_lookup:
try:
r = self.args[self.path]
except Exception:
try:
dummy = self.args[self.path + "/"]
# Ahh... directory without trailing slash. Let's redirect.
self.send_response(302, "Redirect to directory")
self.send_header('Location', self.parent.path + '/')
self.end_headers()
return
except Exception:
pass
if r is None:
r = getattr(self.args, "GET_" + path, None)
if r is None:
r = getattr(self, "GETANY", None)
if r is None and self.args is not None:
try:
r = self.args[None]
except Exception:
pass
if r is None:
r = getattr(self.args, "GETANY", None)
if callable(r):
r = r(self)
if r is None:
self.send_error(404, "File not found")
return
response_headers = []
if len(r) >= 2 and len(r) <= 3 and not isinstance(r, (str,bytes)):
ct = r[0]
if len(r) >= 3:
response_headers = r[2]
r = r[1]
else:
if isinstance(r, str): r = r.encode()
if r.lstrip().startswith(b'{') and r.rstrip().endswith(b'}'):
ct = "application/json"
elif b"<html" in r[:255]:
ct = "text/html"
else:
ct = "text/plain"
if isinstance(r, str): r = r.encode()
except Exception as exc:
self.send_error(500, "Internal server error")
msg = "%s failed trying to get '%s'" % (type(self).__name__, path)
if str(exc): msg += ": " + str(exc)
log.debug(msg)
return
self.send_response(200)
self.send_header("Content-type", ct)
self.send_header("Content-Length", str(len(r)))
if isinstance(response_headers, dict):
response_headers = list(response_headers.items())
for hname,hval in response_headers:
self.send_header(hname, hval)
self.end_headers()
if is_get:
self.wfile.write(r)
class FileUploadHandler (SplitRequestHandler):
"""
A default page to say hi from POX.
"""
def do_GET (self):
"""Serve a GET request."""
self.send_form(True)
def do_HEAD (self):
"""Serve a HEAD request."""
self.send_form(False)
def send_form (self, is_get = False, msg = None):
r = "<html><head><title>POX</title></head>\n"
r += "<body>\n<h1>POX File Upload</h1>\n"
if msg:
r += msg
r += "\n<hr />\n"
r += "<form method='POST' enctype='multipart/form-data' action='?'>\n"
r += "File to upload: <input type='file' name='upload'>\n"
r += "<input type='submit' value='Upload!' /></form>\n"
r += "</body></html>\n"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(r)))
self.end_headers()
if is_get:
self.wfile.write(r.encode())
def do_POST (self):
mime,params = cgi.parse_header(self.headers.get('content-type'))
if mime != 'multipart/form-data':
self.send_error(400, "Expected form data")
return
#query = cgi.parse_multipart(self.rfile, params)
#data = query.get("upload")
data = cgi.FieldStorage( fp = self.rfile, headers = self.headers,
environ={ 'REQUEST_METHOD':'POST' } )
if not data or "upload" not in data:
self.send_error(400, "Expected upload data")
return
uploadfield = data["upload"]
msg = self.on_upload(uploadfield.filename, uploadfield.file)
self.send_form(True, msg=msg)
def on_upload (self, filename, datafile):
data = datafile.read()
import hashlib
h = hashlib.md5()
h.update(data)
hc = h.hexdigest()
msg = "Received file '%s'. bytes:%s md5:%s" % (filename, len(data), hc)
log.warn(msg)
return msg
def upload_test (save=False):
"""
Launch a file upload test
--save will save the file using its MD5 for the filename
"""
class SaveUploader (FileUploadHandler):
def on_upload (self, filename, datafile):
import io
data = datafile.read()
datafile = io.BytesIO(data)
ret = super().on_upload(filename, datafile)
import hashlib
h = hashlib.md5()
h.update(data)
h = h.hexdigest().upper()
with open("FILE_UPLOAD_" + h, "wb") as f:
f.write(data)
return ret
handler = SaveUploader if save else FileUploadHandler
core.WebServer.set_handler("/upload_test", handler)
def launch (address='', port=8000, static=False, ssl_server_key=None,
ssl_server_cert=None, ssl_client_certs=None,
no_cookieguard=False):
"""
Starts a POX webserver
--ssl_client_certs are client certificates which the browser supplies
basically in order to authorize the client. This is much more
secure than just using HTTP authentication.
--static alone enables serving static content from POX's www_root
directory. Otherwise it is a comma-separated list of prefix:paths
pairs to serve (that is, it will serve the path at the prefix. If
there is no colon, it assumes the path and prefix are the same. If
one of the pairs is empty, we'll also serve www_root.
--no-cookieguard disables POX CookieGuard. See POXCookieGuardMixin
documentation for more on this, but the short story is that disabling
it will make your server much more vulnerable to CSRF attacks.
"""
if no_cookieguard:
SplitterRequestHandler.pox_cookieguard = False
assert no_cookieguard is True, "--no-cookieguard takes no argument"
def expand (f):
if isinstance(f, str): return os.path.expanduser(f)
return f
ssl_server_key = expand(ssl_server_key)
ssl_server_cert = expand(ssl_server_cert)
ssl_client_certs = expand(ssl_client_certs)
httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler,
ssl_server_key=ssl_server_key,
ssl_server_cert=ssl_server_cert,
ssl_client_certs=ssl_client_certs)
core.register("WebServer", httpd)
httpd.set_handler("/", CoreHandler, httpd, True)
#httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True)
#httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True)
#httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root")
if static is True:
httpd.add_static_dir('static', 'www_root', relative=True)
elif static is False:
pass
else:
static = static.split(",")
for entry in static:
if entry.lower() == "":
httpd.add_static_dir('static', 'www_root', relative=True)
continue
if ':' not in entry:
directory = entry
prefix = os.path.split(directory)
if prefix[1] == '':
prefix = os.path.split(prefix[0])
prefix = prefix[1]
assert prefix != ''
else:
prefix,directory = entry.split(":")
directory = os.path.expanduser(directory)
httpd.add_static_dir(prefix, directory, relative=False)
def run ():
try:
msg = "https" if httpd.ssl_enabled else "http"
msg += "://%s:%i" % httpd.socket.getsockname()
log.info("Listening at " + msg)
httpd.serve_forever()
except:
pass
log.info("Server quit")
def go_up (event):
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
def go_down (event):
httpd.shutdown()
core.addListenerByName("GoingUpEvent", go_up)
core.addListenerByName("GoingDownEvent", go_down)
|
test_backfill_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import logging
import threading
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.exceptions import (
AirflowException,
AirflowTaskTimeout,
BackfillUnfinished,
DagConcurrencyLimitReached,
NoAvailablePoolSlot,
TaskConcurrencyLimitReached,
)
from airflow.jobs.backfill_job import BackfillJob
from airflow.models import DagBag, Pool, TaskInstance as TI
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstanceKey
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_dags, clear_db_pools, clear_db_runs, set_default_pool_slots
from tests.test_utils.mock_executor import MockExecutor
from tests.test_utils.timetables import cron_timetable
logger = logging.getLogger(__name__)
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@pytest.fixture(scope="module")
def dag_bag():
return DagBag(include_examples=True)
class TestBackfillJob:
@staticmethod
def clean_db():
clear_db_dags()
clear_db_runs()
clear_db_pools()
@pytest.fixture(autouse=True)
def set_instance_attrs(self, dag_bag):
self.clean_db()
self.parser = cli_parser.get_parser()
self.dagbag = dag_bag
def _get_dummy_dag(
self,
dag_maker_fixture,
dag_id='test_dag',
pool=Pool.DEFAULT_POOL_NAME,
max_active_tis_per_dag=None,
task_id='op',
**kwargs,
):
with dag_maker_fixture(dag_id=dag_id, schedule_interval='@daily', **kwargs) as dag:
DummyOperator(task_id=task_id, pool=pool, max_active_tis_per_dag=max_active_tis_per_dag)
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
def test_unfinished_dag_runs_set_to_failed(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.FAILED == dag_run.state
def test_dag_run_with_finished_tasks_set_to_success(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun()
for ti in dag_run.get_task_instances():
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.SUCCESS == dag_run.state
@pytest.mark.xfail(condition=True, reason="This test is flaky")
@pytest.mark.backend("postgres", "mysql")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
target_dag.sync_to_db()
# dag_file_processor = DagFileProcessor(dag_ids=[], log=Mock())
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert not task_instances_list
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_first_depends_on_past=True
)
job.run()
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert task_instances_list
@pytest.mark.backend("postgres", "mysql")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('miscellaneous_test_dag')
end_date = DEFAULT_DATE + datetime.timedelta(days=1)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=end_date,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
expected_execution_order = [
("runme_0", DEFAULT_DATE),
("runme_1", DEFAULT_DATE),
("runme_2", DEFAULT_DATE),
("runme_0", end_date),
("runme_1", end_date),
("runme_2", end_date),
("also_run_this", DEFAULT_DATE),
("also_run_this", end_date),
("run_after_loop", DEFAULT_DATE),
("run_after_loop", end_date),
("run_this_last", DEFAULT_DATE),
("run_this_last", end_date),
]
assert [
((dag.dag_id, task_id, f'backfill__{when.isoformat()}', 1), (State.SUCCESS, None))
for (task_id, when) in expected_execution_order
] == executor.sorted_tasks
session = settings.Session()
drs = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date).all()
assert drs[0].execution_date == DEFAULT_DATE
assert drs[0].state == State.SUCCESS
assert drs[1].execution_date == DEFAULT_DATE + datetime.timedelta(days=1)
assert drs[1].state == State.SUCCESS
dag.clear()
session.close()
@pytest.mark.backend("postgres", "mysql")
@pytest.mark.parametrize(
"dag_id, expected_execution_order",
[
[
"example_branch_operator",
(
"run_this_first",
"branching",
"branch_a",
"branch_b",
"branch_c",
"branch_d",
"follow_branch_a",
"follow_branch_b",
"follow_branch_c",
"follow_branch_d",
"join",
),
],
[
"miscellaneous_test_dag",
("runme_0", "runme_1", "runme_2", "also_run_this", "run_after_loop", "run_this_last"),
],
[
"example_skip_dag",
(
"always_true_1",
"always_true_2",
"skip_operator_1",
"skip_operator_2",
"all_success",
"one_success",
"final_1",
"final_2",
),
],
["latest_only", ("latest_only", "task1")],
],
)
def test_backfill_examples(self, dag_id, expected_execution_order):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
dag = self.dagbag.get_dag(dag_id)
logger.info('*** Running example DAG: %s', dag.dag_id)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
assert [
((dag_id, task_id, f'backfill__{DEFAULT_DATE.isoformat()}', 1), (State.SUCCESS, None))
for task_id in expected_execution_order
] == executor.sorted_tasks
def test_backfill_conf(self, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_conf')
dag_maker.create_dagrun()
executor = MockExecutor()
conf_ = json.loads("""{"key": "value"}""")
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf_,
)
job.run()
# We ignore the first dag_run created by fixture
dr = DagRun.find(
dag_id='test_backfill_conf', execution_start_date=DEFAULT_DATE + datetime.timedelta(days=1)
)
assert conf_ == dr[0].conf
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_max_active_tis_per_dag_limit(self, mock_log, dag_maker):
max_active_tis_per_dag = 2
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_max_active_tis_per_dag_limit',
max_active_tis_per_dag=max_active_tis_per_dag,
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
task_concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= max_active_tis_per_dag
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == max_active_tis_per_dag:
task_concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert task_concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_task_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_dag_concurrency_limit(self, mock_log, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_respect_concurrency_limit')
dag_maker.create_dagrun()
dag.max_active_tasks = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= dag.max_active_tasks
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.max_active_tasks:
concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_dag_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_default_pool_limit(self, mock_log, dag_maker):
default_pool_slots = 2
set_default_pool_slots(default_pool_slots)
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_with_no_pool_limit')
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
default_pool_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# default_pool slots at any point of time.
for running_task_instances in executor.history:
assert len(running_task_instances) <= default_pool_slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == default_pool_slots:
default_pool_task_slot_count_reached_at_least_once = True
assert 8 == num_running_task_instances
assert default_pool_task_slot_count_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_pool_not_found(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_pool_limit(self, mock_log, dag_maker):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
assert 8 == num_running_task_instances
assert pool_was_full_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_task_concurrency_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_run_rescheduled(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_run_rescheduled", task_id="test_backfill_run_rescheduled_task-1"
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_failed_tasks(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_rerun_failed", task_id="test_backfill_rerun_failed_task-1"
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_succeeded_tasks(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_rerun_succeeded", task_id="test_backfill_rerun_succeeded_task-1"
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_succeeded_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_succeeded_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_succeeded_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_upstream_failed_tasks(self, dag_maker):
with dag_maker(dag_id='test_backfill_rerun_upstream_failed', schedule_interval='@daily') as dag:
op1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1')
op2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2')
op1.set_upstream(op2)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_failed_tasks_without_flag(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id='test_backfill_rerun_failed', task_id='test_backfill_rerun_failed_task-1'
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False,
)
with pytest.raises(AirflowException):
job.run()
def test_backfill_retry_intermittent_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_intermittent_failure_job',
schedule_interval="@daily",
default_args={
'retries': 2,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dag_maker.create_dagrun()
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=2)
] = State.UP_FOR_RETRY
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
def test_backfill_retry_always_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_always_failure_job',
schedule_interval="@daily",
default_args={
'retries': 1,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dr = dag_maker.create_dagrun()
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, dr.run_id, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_fail(dag.dag_id, task1.task_id, dr.run_id, try_number=2)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
)
with pytest.raises(BackfillUnfinished):
job.run()
def test_backfill_ordered_concurrent_execute(self, dag_maker):
with dag_maker(
dag_id='test_backfill_ordered_concurrent_execute',
schedule_interval="@daily",
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
runid0 = f'backfill__{DEFAULT_DATE.isoformat()}'
dag_maker.create_dagrun(run_id=runid0)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
runid1 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()}'
runid2 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=2)).isoformat()}'
# test executor history keeps a list
history = executor.history
assert [sorted(item[-1].key[1:3] for item in batch) for batch in history] == [
[
('leave1', runid0),
('leave1', runid1),
('leave1', runid2),
('leave2', runid0),
('leave2', runid1),
('leave2', runid2),
],
[('upstream_level_1', runid0), ('upstream_level_1', runid1), ('upstream_level_1', runid2)],
[('upstream_level_2', runid0), ('upstream_level_2', runid1), ('upstream_level_2', runid2)],
[('upstream_level_3', runid0), ('upstream_level_3', runid1), ('upstream_level_3', runid2)],
]
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
session.close()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
executor = MockExecutor(do_update=True)
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
# run with timeout because this creates an infinite loop if not
# caught
try:
with timeout(seconds=5):
job.run()
except AirflowTaskTimeout:
pass
ti = TI(task=dag.get_task('test_backfill_pooled_task'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@pytest.mark.parametrize("ignore_depends_on_past", [True, False])
def test_backfill_depends_on_past_works_independently_on_ignore_depends_on_past(
self, ignore_depends_on_past
):
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
executor=MockExecutor(),
ignore_first_depends_on_past=ignore_depends_on_past,
).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
kwargs = dict(
start_date=start_date,
end_date=end_date,
)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, ignore_first_depends_on_past=True, **kwargs)
job.run()
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
assert ti.state == State.SUCCESS
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: test_dop_task'
with pytest.raises(AirflowException, match=expected_msg):
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, run_backwards=True, **kwargs)
job.run()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'dags',
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay-on-limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
assert 0.5 == parsed_args.delay_on_limit
def _get_dag_test_max_active_limits(
self, dag_maker_fixture, dag_id='test_dag', max_active_runs=1, **kwargs
):
with dag_maker_fixture(
dag_id=dag_id,
schedule_interval="@hourly",
max_active_runs=max_active_runs,
**kwargs,
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
return dag
def test_backfill_max_limit_check_within_limit(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_within_limit', max_active_runs=16
)
dag_maker.create_dagrun()
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
assert 2 == len(dagruns)
assert all(run.state == State.SUCCESS for run in dagruns)
def test_backfill_max_limit_check(self, dag_maker):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dag_run'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
# this session object is different than the one in the main thread
with create_session() as thread_session:
try:
dag = self._get_dag_test_max_active_limits(
dag_maker,
dag_id=dag_id,
)
dag_maker.create_dagrun(
# Existing dagrun that is not within the backfill range
run_id=run_id,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
)
thread_session.commit()
cond.notify()
finally:
cond.release()
thread_session.close()
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
backfill_job_thread = threading.Thread(
target=run_backfill, name="run_backfill", args=(dag_run_created_cond,)
)
dag_run_created_cond.acquire()
with create_session() as session:
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
assert 1 == len(dagruns)
dr = dagruns[0]
assert dr.run_id == run_id
# allow the backfill to execute
# by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
assert 3 == len(dagruns) # 2 from backfill + 1 existing
assert dagruns[-1].run_id == dr.run_id
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self, dag_maker):
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_no_count_existing'
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
assert 1 == len(dagruns)
assert State.SUCCESS == dagruns[0].state
def test_backfill_max_limit_check_complete_loop(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_complete_loop'
)
dag_maker.create_dagrun()
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
assert success_expected == success_dagruns
assert 0 == running_dagruns # no dag_runs in running state are left
def test_sub_set_subdag(self, dag_maker):
with dag_maker(
'test_sub_set_subdag',
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dr = dag_maker.create_dagrun()
executor = MockExecutor()
sub_dag = dag.partial_subset(
task_ids_or_regex="leave*", include_downstream=False, include_upstream=False
)
job = BackfillJob(dag=sub_dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
job.run()
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
assert State.SUCCESS == ti.state
else:
assert State.NONE == ti.state
def test_backfill_fill_blanks(self, dag_maker):
with dag_maker(
'test_backfill_fill_blanks',
) as dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dr = dag_maker.create_dagrun()
executor = MockExecutor()
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
with pytest.raises(AirflowException, match='Some task instances failed'):
job.run()
dr.refresh_from_db()
assert dr.state == State.FAILED
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
assert ti.state == State.SUCCESS
elif ti.task_id == op2.task_id:
assert ti.state == State.FAILED
elif ti.task_id == op3.task_id:
assert ti.state == State.SKIPPED
elif ti.task_id == op5.task_id:
assert ti.state == State.UPSTREAM_FAILED
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.timetable = cron_timetable('@daily')
start_date = timezone.utcnow()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=start_date, end_date=start_date, executor=executor, donot_pickle=True
)
job.run()
subdag_op_task.pre_execute(context={'execution_date': start_date})
subdag_op_task.execute(context={'execution_date': start_date})
subdag_op_task.post_execute(context={'execution_date': start_date})
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
assert 5 == len(subdag_history)
for sdh in subdag_history:
ti = sdh[3]
assert 'section-1-task-' in ti.task_id
with create_session() as session:
successful_subdag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == subdag.dag_id)
.filter(DagRun.execution_date == start_date)
.filter(DagRun.state == State.SUCCESS)
.count()
)
assert 1 == successful_subdag_runs
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('clear_subdag_test_dag')
subdag_op_task = dag.get_task('daily_job')
subdag = subdag_op_task.subdag
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
with timeout(seconds=30):
job.run()
ti_subdag = TI(task=dag.get_task('daily_job'), execution_date=DEFAULT_DATE)
ti_subdag.refresh_from_db()
assert ti_subdag.state == State.SUCCESS
ti_irrelevant = TI(task=dag.get_task('daily_job_irrelevant'), execution_date=DEFAULT_DATE)
ti_irrelevant.refresh_from_db()
assert ti_irrelevant.state == State.SUCCESS
ti_downstream = TI(task=dag.get_task('daily_job_downstream'), execution_date=DEFAULT_DATE)
ti_downstream.refresh_from_db()
assert ti_downstream.state == State.SUCCESS
sdag = subdag.partial_subset(
task_ids_or_regex='daily_job_subdag_task', include_downstream=True, include_upstream=False
)
sdag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, include_parentdag=True)
ti_subdag.refresh_from_db()
assert State.NONE == ti_subdag.state
ti_irrelevant.refresh_from_db()
assert State.SUCCESS == ti_irrelevant.state
ti_downstream.refresh_from_db()
assert State.NONE == ti_downstream.state
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
session = settings.Session()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
dr = DagRun(
dag_id=subdag.dag_id, execution_date=DEFAULT_DATE, run_id="test", run_type=DagRunType.BACKFILL_JOB
)
session.add(dr)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'), run_id=dr.run_id, state=State.REMOVED
)
removed_task_ti.dag_id = subdag.dag_id
dr.task_instances.append(removed_task_ti)
session.commit()
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = (
session.query(TI)
.filter(
TI.dag_id == subdag.dag_id, TI.task_id == task.task_id, TI.execution_date == DEFAULT_DATE
)
.first()
)
assert instance is not None
assert instance.state == State.SUCCESS
removed_task_ti.refresh_from_db()
assert removed_task_ti.state == State.REMOVED
subdag.clear()
dag.clear()
def test_update_counters(self, dag_maker):
with dag_maker(dag_id='test_manage_executor_state', start_date=DEFAULT_DATE) as dag:
task1 = DummyOperator(task_id='dummy', owner='airflow')
dr = dag_maker.create_dagrun()
job = BackfillJob(dag=dag)
session = settings.Session()
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 1
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 1
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 1
assert len(ti_status.to_run) == 0
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for reschedule
# For rescheduled state, tests that reduced_key is not
# used by upping try_number.
ti._try_number = 2
ti.set_state(State.UP_FOR_RESCHEDULE, session)
assert ti.try_number == 3 # see ti.try_number property in taskinstance module
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
# Setting ti._try_number = 0 brings us to ti.try_number==1
# so that the reduced_key access will work fine
ti._try_number = 0
assert ti.try_number == 1 # see ti.try_number property in taskinstance module
session.merge(ti)
session.commit()
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
session.close()
def test_dag_dagrun_infos_between(self, dag_maker):
with dag_maker(
dag_id='dagrun_infos_between', start_date=DEFAULT_DATE, schedule_interval="@hourly"
) as test_dag:
DummyOperator(
task_id='dummy',
owner='airflow',
)
assert [DEFAULT_DATE] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE,
latest=DEFAULT_DATE,
)
]
assert [
DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE,
] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE - datetime.timedelta(hours=3),
latest=DEFAULT_DATE,
)
]
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
session = settings.Session()
tis = (
session.query(TI)
.join(TI.dag_run)
.filter(TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy')
.order_by(DagRun.execution_date)
.all()
)
queued_times = [ti.queued_dttm for ti in tis]
assert queued_times == sorted(queued_times, reverse=True)
assert all(ti.state == State.SUCCESS for ti in tis)
dag.clear()
session.close()
def test_reset_orphaned_tasks_with_orphans(self, dag_maker):
"""Create dagruns and ensure only ones with correct states are reset."""
prefix = 'backfill_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
tasks = []
with dag_maker(dag_id=prefix) as dag:
for i in range(len(states)):
task_id = f"{prefix}_task_{i}"
task = DummyOperator(task_id=task_id)
tasks.append(task)
session = settings.Session()
job = BackfillJob(dag=dag)
# create dagruns
dr1 = dag_maker.create_dagrun()
dr2 = dag.create_dagrun(run_id='test2', state=State.SUCCESS)
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
assert 2 == job.reset_state_for_orphaned_tasks()
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
assert ti.state is None
else:
assert state == ti.state
# otherwise not
for state, ti in zip(states, dr2_tis):
assert state == ti.state
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
assert state == ti.state
def test_reset_orphaned_tasks_specified_dagrun(self, session, dag_maker):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
task_id = dag_id + '_task'
with dag_maker(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily',
session=session,
) as dag:
DummyOperator(task_id=task_id, dag=dag)
job = BackfillJob(dag=dag)
# make two dagruns, only reset for one
dr1 = dag_maker.create_dagrun(state=State.SUCCESS)
dr2 = dag.create_dagrun(run_id='test2', state=State.RUNNING, session=session)
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.flush()
num_reset_tis = job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
assert 1 == num_reset_tis
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
assert State.SCHEDULED == ti1.state
assert State.NONE == ti2.state
def test_job_id_is_assigned_to_dag_run(self, dag_maker):
dag_id = 'test_job_id_is_assigned_to_dag_run'
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily') as dag:
DummyOperator(task_id="dummy_task", dag=dag)
job = BackfillJob(
dag=dag, executor=MockExecutor(), start_date=datetime.datetime.now() - datetime.timedelta(days=1)
)
job.run()
dr: DagRun = dag.get_last_dagrun()
assert dr.creating_job_id == job.id
def test_backfill_has_job_id(self):
"""Make sure that backfill jobs are assigned job_ids."""
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
assert executor.job_id is not None
|
trader_thread.py
|
import logging
import sys
import threading
import time
from collections import deque
import pandas as pd
from datetime import datetime
# ---
from win10toast import ToastNotifier
import pandas as pd
import numpy as np
import pickle
import os
import asyncio
import datetime
from datetime import datetime
from datetime import timedelta, timezone
from typing import Optional
from dotenv import load_dotenv
from operator import itemgetter
load_dotenv()
PWD = os.getenv("PWD")
db_name = PWD + "\\database" + "\\RVNUSDT.db"
import sys
sys.path.insert(1, PWD + "\\modules")
from alg_modules.alg_handler import AlgHandler
from plot_modules.candle_plot import CandlePlot
from collections import deque
from paper_trade import PaperTrader
import time
import logging
DEBUG = __debug__
LOG_FILE_NAME = "log_file_name.log"
format = "%(asctime)s [%(levelname)s]: %(message)s"
logger = logging.basicConfig(
filename=LOG_FILE_NAME if not DEBUG else None,
format=format,
encoding="utf-8",
level=logging.INFO,
)
if not DEBUG:
logging.getLogger(logger).addHandler(logging.StreamHandler())
from stop_loss import StopLoss
from trade_strategy import TradeStrategy
from wss_thread import WssThread
from api_modules.open_binance_api import OpenBinanceApi
import pytz
tzdata = pytz.timezone('Europe/Moscow')
# ---
class Trader(object):
'''docstring for Trader'''
def __init__(self, ):
super().__init__()
self.is_stopped = None
self._thread = threading.Thread(target=self.between_callback, args=())
# self._thread = threading.Thread(target=asyncio.run, args=())
self._lock = threading.Lock()
def between_callback(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self.thread_function(self))
self.loop.close()
async def thread_function(self, *args, **kwargs):
# ====
def compute_timedelta(dt: datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.now(timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime, result = None):
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
# ====
server_time = datetime.fromtimestamp(OpenBinanceApi.server_time()/1000)
local_time = datetime.now()
delay = server_time - local_time
# ====
# notifications
toast = ToastNotifier()
static_notification_settings = dict(
title="Algo traid BOT",
duration = 20,
icon_path = "python.ico",
threaded = 1,
)
notify = lambda msg: toast.show_toast(
msg=msg,
**static_notification_settings,
)
msg="Watch out for notifications from here"
async def notification(msg):
if not notify(msg):
await asyncio.sleep(20)
notify(msg)
await notification(msg)
# ====
DATA_AWAIT_TIME = 1 # seconds
SERVER_DELAY = 10 # seconds
INTERVAL_SECONDS = 60 # seconds
# request that data from api
w = WssThread(
url='wss://stream.binance.com:9443/ws/rvnusdt@ticker',
maxlen=10,
)
w.start()
STOP_LOSS_ENABLED=True
STOP_LOSS_THRESHOLD=-1.3
DEQUE_MAX_LENGTH = 200
INTERVAL = '1m'
df = OpenBinanceApi.get_df(
pair = 'RVNUSDT',
interval = INTERVAL,
limit = 1000,
)
# drop last row TODO make assert to not dublicate last row from cycle
df = df[:-1]
stop_loss_trade_flag = False
MA_list = (2, 7, 25, 100)
window = deque(maxlen=200)
for i, row in df.iterrows():
window.append(dict(row.squeeze()))
#initial currency resources
p_trdr = PaperTrader(
main_currency_label='RVN',
secondary_currency_label='USD',
main_currency_amount=100,
secondary_currency_amount=0,
fee=0.1,
)
trade_data = pd.DataFrame(
columns = p_trdr.get_df(timestamp=df.iloc[-1]['Date']).columns.values
)
stop_loss = StopLoss(
STOP_LOSS_THRESHOLD=STOP_LOSS_THRESHOLD,
)
# init alg
alg = AlgHandler(
df=pd.DataFrame([]),
MA_list=MA_list,
)
while not self._stopped:
logging.info('===get new data===')
new_df = OpenBinanceApi.get_df(
pair = 'RVNUSDT',
interval = INTERVAL,
limit = 2,
)
dt = datetime.fromtimestamp(int(new_df.Real_Date[-1:])/1000)
server_time = datetime.fromtimestamp(OpenBinanceApi.server_time()/1000)
logging.debug(f'server time: {server_time} {server_time.minute=}, {dt.minute=}')
# extract function?
if server_time.minute == dt.minute:
logging.debug('+++===success===+++')
window.append(dict(new_df[-2:-1].squeeze()))
df_ = pd.DataFrame(window)
# === process data here ===
# display(df_)
for _, row in df_.iterrows():
# window.append(dict(row))
# df__ = pd.DataFrame(window)
alg.update_data(df_)
alg.calculate(val_col='Open', time_col='Date',)
do_trade, cross_type = alg.evaluate()
if STOP_LOSS_ENABLED:
stop_loss_trade_flag, trade_data = stop_loss.stop_loss_alg(
trade_data=trade_data,
p_trdr=p_trdr,
row=row,
)
if do_trade:
trade_data, stop_loss_trade_flag = TradeStrategy.trade_alg(stop_loss_trade_flag, trade_data, p_trdr, row, cross_type)
await notification(f'Trade done: {trade_data}\n {trade_data[-1:]}')
logging.info(f'do trade: {do_trade}')
self.p_trdr = p_trdr
self.alg = alg
# self.df__ = df__
self.df_ = df_
self.window = window
# display(trade_data)
# display(alg.crosses)
# === end of data processing ===
time_to_sleep = dt - delay + timedelta(seconds=SERVER_DELAY) + timedelta(seconds=INTERVAL_SECONDS)
server_delay = dt - server_time
logging.debug(f'server valid time: {server_time}')
logging.debug(f'server delay: {server_delay.total_seconds()}')
logging.debug(f'sleep till: {time_to_sleep}')
await sleep_until(time_to_sleep)
else:
logging.debug('---not valid---')
logging.debug('sleep 1 sec')
await asyncio.sleep(DATA_AWAIT_TIME)
def get_data(self, ) -> dict:
with self._lock:
# e = (self.queue or [None])[-1]
try:
# return(ast.literal_eval(e)) #???
return self.p_trdr, self.alg, self.df_, self.window
except ValueError as err:
logging.debug(err)
def start(self):
'''Start the thread'''
logging.info(f"[{self.__class__.__name__}] Opening thread")
self.is_stopped = False
self._thread.start()
def close(self) -> None:
''' Close the thread'''
with self._lock:
logging.info(f"[{self.__class__.__name__}] Closing thread")
self.is_stopped = True
# self.loop.keep_running = False
loop = asyncio.get_event_loop()
loop.stop()
loop.close()
@property
def _stopped(self):
return self.is_stopped
@property
def is_alive(self):
return self._thread.is_alive()
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
w = Trader(
# url='wss://stream.binance.com:9443/ws/rvnusdt@ticker',
# maxlen=10,
)
w.start()
try:
while True:
print(w.get_data())
time.sleep(5)
finally:
# close thread
w.close()
|
filemanager.py
|
import os
import re
import threading
import urllib
try:
import urllib.parse as urlparse
except ImportError: # py2
import urlparse
import xbmc
import xbmcvfs
from contextlib import closing
from lib import cleaner
from lib.libs import mediainfo as info, mediatypes, pykodi, quickjson, utils
from lib.libs.addonsettings import settings
from lib.libs.pykodi import localize as L, log
from lib.libs.webhelper import Getter, GetterError
CANT_CONTACT_PROVIDER = 32034
HTTP_ERROR = 32035
CANT_WRITE_TO_FILE = 32037
REMOTE_CONTROL_REQUIRED = 32039
FILEERROR_LIMIT = 3
PROVIDERERROR_LIMIT = 3
TEMP_DIR = 'special://temp/recycledartwork/'
typemap = {'image/jpeg': 'jpg', 'image/png': 'png', 'image/gif': 'gif'}
# REVIEW: Deleting replaced artwork. If [movie base name]-fanart.jpg exists and AB is
# configured for fanart.jpg, downloading a new artwork will save to the short name but
# leave the long name, and the next scan will pick up the long name.
# ditto scanning 'logo.png' at first and saving new 'clearlogo.png', but clearlogo will be picked
# first by the next scan so that's not such a big deal.
class FileManager(object):
def __init__(self, debug=False, bigcache=False):
self.getter = Getter()
self.getter.session.headers['User-Agent'] = settings.useragent
self.size = 0
self.fileerror_count = 0
self.provider_errors = {}
self.debug = debug
self.alreadycached = None if not bigcache else []
self._build_imagecachebase()
def _build_imagecachebase(self):
result = pykodi.execute_jsonrpc({"jsonrpc": "2.0", "id": 1, "method": "Settings.GetSettings",
"params": {"filter": {"category": "control", "section": "services"}}})
port = 80
username = ''
password = ''
secure = False
server_enabled = True
if result.get('result', {}).get('settings'):
for setting in result['result']['settings']:
if setting['id'] == 'services.webserver' and not setting['value']:
server_enabled = False
break
if setting['id'] == 'services.webserverusername':
username = setting['value']
elif setting['id'] == 'services.webserverport':
port = setting['value']
elif setting['id'] == 'services.webserverpassword':
password = setting['value']
elif setting['id'] == 'services.webserverssl' and setting['value']:
secure = True
username = '{0}:{1}@'.format(username, password) if username and password else ''
else:
server_enabled = False
if server_enabled:
protocol = 'https' if secure else 'http'
self.imagecachebase = '{0}://{1}localhost:{2}/image/'.format(protocol, username, port)
else:
self.imagecachebase = None
log(L(REMOTE_CONTROL_REQUIRED), xbmc.LOGWARNING)
def downloadfor(self, mediaitem, allartwork=True):
if self.fileerror_count >= FILEERROR_LIMIT:
return False, ''
if not info.can_saveartwork(mediaitem):
return False, ''
to_download = get_downloadable_art(mediaitem, allartwork)
if not to_download:
return False, ''
services_hit = False
error = ''
localfiles = get_local_art(mediaitem, allartwork)
for arttype, url in to_download.items():
hostname = urlparse.urlparse(url).netloc
if self.provider_errors.get(hostname, 0) >= PROVIDERERROR_LIMIT:
continue
full_basefilepath = info.build_artwork_basepath(mediaitem, arttype)
if not full_basefilepath:
continue
if self.debug:
mediaitem.downloadedart[arttype] = full_basefilepath + '.ext'
continue
result, err = self.doget(url)
if err:
error = err
self.provider_errors[hostname] = self.provider_errors.get(hostname, 0) + 1
continue
if not result:
# 404 URL dead, wipe it so we can add another one later
mediaitem.downloadedart[arttype] = None
continue
self.size += int(result.headers.get('content-length', 0))
services_hit = True
ext = get_file_extension(result.headers.get('content-type'), url)
if not ext:
log("Can't determine extension for '{0}'\nfor image type '{1}'".format(url, arttype))
continue
full_basefilepath += '.' + ext
if xbmcvfs.exists(full_basefilepath):
if extrafanart_name_used(full_basefilepath, localfiles):
# REVIEW: can this happen in any other circumstance?
full_basefilepath = get_next_filename(full_basefilepath, localfiles)
localfiles.append(full_basefilepath)
if xbmcvfs.exists(full_basefilepath) and settings.recycle_removed:
recyclefile(full_basefilepath)
else:
folder = os.path.dirname(full_basefilepath)
if not xbmcvfs.exists(folder):
xbmcvfs.mkdirs(folder)
# For now this just downloads the whole thing in memory, then saves it to file.
# Maybe chunking it will be better when GIFs are handled
file_ = xbmcvfs.File(full_basefilepath, 'wb')
with closing(file_):
if not file_.write(result.content):
self.fileerror_count += 1
raise FileError(L(CANT_WRITE_TO_FILE).format(full_basefilepath))
self.fileerror_count = 0
mediaitem.downloadedart[arttype] = full_basefilepath
log("downloaded '{0}'\nto image file '{1}'".format(url, full_basefilepath))
return services_hit, error
def doget(self, url, **kwargs):
try:
result = self.getter(url, **kwargs)
if not result and url.startswith('http://'):
# Try https, the browser "that totally shows this image" probably is, even if no redirect
result, err = self.doget('https://' + url[7:])
if err or not result:
result = None
return result, None
except GetterError as ex:
message = L(CANT_CONTACT_PROVIDER) if ex.connection_error \
else L(HTTP_ERROR).format(ex.message) + '\n' + url
return None, message
def remove_deselected_files(self, mediaitem, assignedart=False):
if self.debug:
return
for arttype, newimage in mediaitem.selectedart.iteritems():
if newimage is not None:
continue
if assignedart:
oldimage = mediaitem.art.get(arttype)
else:
oldimage = mediaitem.forcedart.get(arttype)
if not oldimage:
continue
old_url = oldimage['url'] if isinstance(oldimage, dict) else \
oldimage if isinstance(oldimage, basestring) else oldimage[0]['url']
if not old_url or old_url.startswith(pykodi.notimagefiles) \
or old_url in mediaitem.selectedart.values() or not xbmcvfs.exists(old_url):
continue
if settings.recycle_removed:
recyclefile(old_url)
xbmcvfs.delete(old_url)
def set_bigcache(self):
if self.alreadycached is None:
self.alreadycached = []
def cachefor(self, artmap, multiplethreads=False):
if not self.imagecachebase or self.debug:
return 0
urls = [url for url in artmap.values() if url and not url.startswith(('http', 'image'))]
if not urls:
return 0
if self.alreadycached is not None:
if not self.alreadycached:
self.alreadycached = [pykodi.unquoteimage(texture['url']) for texture in quickjson.get_textures()
if not pykodi.unquoteimage(texture['url']).startswith(('http', 'image'))]
alreadycached = self.alreadycached
else:
alreadycached = [pykodi.unquoteimage(texture['url']) for texture in quickjson.get_textures(urls)]
count = [0]
def worker(path):
try:
res, _ = self.doget(self.imagecachebase + urllib.quote(pykodi.quoteimage(path), ''), stream=True)
if res:
res.iter_content(chunk_size=1024)
res.close()
count[0] += 1
except GetterError:
pass
threads = []
for path in urls:
if path in alreadycached:
continue
if multiplethreads:
t = threading.Thread(target=worker, args=(path,))
threads.append(t)
t.start()
else:
worker(path)
for t in threads:
t.join()
return count[0]
def extrafanart_name_used(path, localfiles):
return utils.parent_dir(path) == 'extrafanart' and path in localfiles
def get_file_extension(contenttype, request_url, re_search=re.compile(r'\.\w*$')):
if contenttype in typemap:
return typemap[contenttype]
if re.search(re_search, request_url):
return request_url.rsplit('.', 1)[1]
def get_next_filename(full_basefilepath, localfiles):
nextname = full_basefilepath
char_int = 97
while nextname in localfiles:
name, ext = os.path.splitext(full_basefilepath)
nextname = name + chr(char_int) + ext
char_int += 1
return nextname
def get_downloadable_art(mediaitem, allartwork):
if allartwork:
downloadable = dict(mediaitem.art)
downloadable.update(mediaitem.selectedart)
else:
downloadable = dict(mediaitem.selectedart)
for arttype in list(downloadable):
if not downloadable[arttype] or not downloadable[arttype].startswith('http') or \
not mediatypes.downloadartwork(mediaitem.mediatype, arttype):
del downloadable[arttype]
return downloadable
def get_local_art(mediaitem, allartwork):
local = []
if allartwork:
arts = mediaitem.art if settings.clean_imageurls else \
cleaner.clean_artwork(mediaitem) # library URLs not cleaned, but can still help here
for url in arts.values():
if url and not url.startswith('http'):
local.append(url)
for url in mediaitem.selectedart.values():
if url and not url.startswith('http'):
local.append(url)
return local
def recyclefile(filename):
firstdir = utils.parent_dir(filename)
directory = TEMP_DIR
pathsep = utils.get_pathsep(directory)
if firstdir in ('extrafanart', 'extrathumbs'):
directory += utils.parent_dir(os.path.dirname(filename)) + pathsep
directory += firstdir
if not xbmcvfs.exists(directory):
xbmcvfs.mkdirs(directory)
recycled_filename = directory + pathsep + os.path.basename(filename)
if not xbmcvfs.copy(filename, recycled_filename):
raise FileError(L(CANT_WRITE_TO_FILE).format(recycled_filename))
class FileError(Exception):
def __init__(self, message, cause=None):
super(FileError, self).__init__()
self.cause = cause
self.message = message
|
example_threads.py
|
from farmbot import Farmbot, FarmbotToken
import threading
# PYTHON MULTITHREAD EXAMPLE.
# ==========================================================
# The main thread has a blocking loop that waits for user
# input. The W/A/S/D keys are used to move FarmBot. Commands
# are entered in a queue that are processed in a background
# thread so as to not be blocked when waiting for keyboard
# input.
# ==========================================================
class MyHandler:
def __init__(self, bot):
# Store "W", "A", "S", "D" in a queue
self.queue = []
# Maintain a flag that lets us know if the bot is
# ready for more commands.
self.busy = True
self.bot = bot
def add_job(self, direction):
d = direction.capitalize()
if d in ["W", "A", "S", "D"]:
self.queue.append(d)
self.bot.read_status()
def try_next_job(self):
if (len(self.queue) > 0) and (not self.busy):
command = self.queue.pop(0)
print("sending " + command)
self.busy = True
if command == "W":
return self.bot.move_relative(10, 0, 0)
if command == "A":
return self.bot.move_relative(0, -10, 0)
if command == "S":
return self.bot.move_relative(-10, 0, 0)
if command == "D":
return self.bot.move_relative(0, 10, 0)
def on_connect(self, bot, mqtt_client):
self.bot.read_status()
pass
def on_change(self, bot, state):
is_busy = state['informational_settings']['busy']
if is_busy != self.busy:
if is_busy:
print("Device is busy")
else:
print("Device is idle")
self.busy = is_busy
self.try_next_job()
def on_log(self, _bot, log):
print("LOG: " + log['message'])
def on_response(self, _bot, _response):
pass
def on_error(self, _bot, response):
print("ERROR: " + response.id)
print("Reason(s) for failure: " + str(response.errors))
if __name__ == '__main__':
raw_token = FarmbotToken.download_token(
"[email protected]", "pass", "https://my.farm.bot")
fb = Farmbot(raw_token)
handler = MyHandler(fb)
threading.Thread(target=fb.connect, name="foo", args=[handler]).start()
print("ENTER A DIRECTION VIA WASD:")
print(" ^")
print(" W")
print(" < A S >")
print(" D")
print(" v")
while(True):
direction = input("> ")
handler.add_job(direction)
handler.try_next_job()
|
process.py
|
import os
import sys
import pty
import threading
import subprocess
import time
from select import select
class ProHelper(object):
"""
A helper class for keeping track of long-running processes. It launches the process
in background, receives output from it, sends input to it. It also can emulate an
interactive terminalm in case your process does something different when launched
in a terminal (like showing a progress bar).
"""
process = None
def __init__(self, command, shell = False, use_terminal = True, output_callback = None, cwd = None, popen_kwargs = None):
self.command = command
self.shell = shell
self.cwd = cwd
self.use_terminal = use_terminal
self.output_callback = output_callback
self.popen_kwargs = popen_kwargs if popen_kwargs else {}
if self.output_callback == 'print':
self.output_callback = self.print_output
def run(self):
"""
Launches the process (in the background), either with an emulated
terminal or not.
"""
if self.use_terminal:
self.terminal, self.s = pty.openpty()
self.process = subprocess.Popen(self.command, stdin=self.s, stdout=self.s, stderr=self.s, shell=self.shell, cwd=self.cwd, close_fds=True, **self.popen_kwargs)
else:
raise NotImplementedException
def output_available(self, timeout=0.1):
"""
Returns True if there is output from the command that
hasn't yet been processed.
"""
if self.use_terminal:
return select([self.terminal], [], [], timeout)[0]
else:
raise NotImplementedException
def read(self, size, timeout=None):
"""
Reads output from the process, limited by size (with an optional
timeout).
"""
if self.use_terminal:
s = select([self.terminal], [], [], timeout)[0]
if s:
return os.read(s[0], size)
else:
raise NotImplementedException
def readall_or_until(self, timeout=0, readsize=1, until=None):
"""
Reads all available output from the process, or until a character is encountered.
Timeout is 0 by default, meaning that function will return immediately.
"""
output = []
while self.output_available():
data = self.read(readsize, timeout)
output.append(data)
if data == until:
break
return "".join(output)
def readall(self, timeout=0, readsize=1):
"""
Reads all available output from the process. Timeout is 0 by default,
meaning that function will return immediately (unless output is a constant
stream of data, in which case it's best if you use ``readall_or_until``.
"""
output = []
while self.output_available():
data = self.read(readsize, timeout)
output.append(data)
return "".join(output)
def write(self, data):
"""
Sends input to the process.
"""
if self.use_terminal:
return os.write(self.terminal, data)
else:
raise NotImplementedException
def run_in_foreground(self, delay=0.5):
"""
This method starts the process, blocks until it's finished
and returns a status code. Don't use this function if you want to send
input to the function, kill the process at a whim, or do something else
that is not done by the output callback (unless you're running it in a
separate thread - which might be unwarranted).
"""
self.run()
while self.is_ongoing():
if callable(self.output_callback):
self.relay_output()
time.sleep(delay)
return True
def run_in_background(self, delay=0.5, thread_name=None):
"""
Runs the ``run_in_foreground`` method in a separate thread.
Can set the thread name, can also pass the ``delay`` argument.
"""
self.thread = threading.Thread(target=self.run_in_foreground, kwargs={"delay":delay})
self.thread.daemon = True
self.thread.start()
def poll(self, **read_kw):
"""
This function polls the process for output (and relays it into the callback),
as well as polls whether the process is finished.
"""
if self.process:
self.process.poll()
if callable(self.output_callback):
self.relay_output(**read_kw)
def relay_output(self, read_type="readall", read_kw={}):
"""
This method checks if there's output waiting to be read; if there is,
it reads all of it and sends it to the output callback.
"""
if self.output_available():
read = getattr(self, read_type)
output = read(timeout=0, **read_kw)
self.output_callback(output)
def print_output(self, data):
"""
The default function used for processing output from the command.
For now, it simply sends the data to ``sys.stdout``.
"""
sys.stdout.write(data)
sys.stdout.flush()
def kill_process(self):
"""
Terminates the process if it's running.
"""
if not self.is_ongoing():
return False
return self.process.terminate()
def get_return_code(self):
"""
Returns the process' return code - if the process is not yet finished, return None.
"""
return self.process.returncode
def is_ongoing(self):
"""
Returns whether the process is still ongoing. If the process wasn't started yet,
return False.
"""
if not self.process:
return False
self.process.poll()
return self.process.returncode is None
def dump_info(self):
self.poll()
ongoing = self.is_ongoing()
info = {"command":self.command, "is_ongoing":ongoing, "return_code":None,
"cwd":self.cwd, "shell":self.shell, "use_terminal":self.use_terminal,
"output_callback":str(self.output_callback),
"popen_kwargs":self.popen_kwargs }
if not ongoing:
info["return_code"] = self.get_return_code()
return info
import unittest
class TestProHelper(unittest.TestCase):
"""Tests the ProHelper"""
def test_constructor(self):
ph = ProHelper("python")
self.assertIsNotNone(ph)
def test_run_foreground(self):
ph = ProHelper(["echo", "hello"], use_terminal=True)
ph.run_in_foreground()
assert(ph.output_available())
output = ph.readall(timeout=5, readsize=1024)
assert(output.strip() == "hello")
assert(ph.get_return_code() == 0)
def test_exit_code_1(self):
ph = ProHelper("false", use_terminal=True)
ph.run_in_foreground()
assert(ph.get_return_code() == 1)
def test_dump_info(self):
ph = ProHelper("false", use_terminal=True)
ph.run_in_foreground()
info = ph.dump_info()
assert(info["return_code"] == 1)
assert(info["is_ongoing"] == False)
def test_launch_kill(self):
ph = ProHelper("python", use_terminal=True, output_callback=lambda *a, **k: True)
ph.run()
ph.poll()
assert(ph.is_ongoing())
ph.kill_process()
ph.poll()
assert(not ph.is_ongoing())
@unittest.skip("No idea how to implement this properly without making test running even more long")
def test_input(self):
ph = ProHelper(["python", "-c", "raw_input('hello')"], use_terminal=True)
ph.run()
ph.poll()
assert(ph.is_ongoing())
ph.write('\n')
output = ph.readall(timeout=5, readsize=1024)
ph.poll()
ph.read(1)
print(repr(output))
assert(not ph.is_ongoing())
assert(output.strip() == "hello")
if __name__ == '__main__':
import sys
if sys.argv[-1] != "play":
unittest.main()
|
example_userInfoCrawler.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
注意:
1. 服务端上需自行配置代理
2. start_num end_num 为需手动填写
"""
from luogu import *
from openpyxl import Workbook
from openpyxl import load_workbook
from bs4 import BeautifulSoup
from urllib import request, error
from queue import Queue
import time
import queue
import os
import ssl
import json
import threading
import linecache
# 必填内容
start_num = 1
end_num = 1000
# 洛谷网站
defaultURL = "https://www.luogu.org"
userURL = "https://www.luogu.org/space/show?uid="
# 此处不建议修改
title = ['id', '名字', '头像', '总提交数', 'AC数', '贡献', '活跃', '积分', '用户类型', '注册时间']
wbName = 'luogu2.xlsx'
wsName = '1'
downloadPath = 'download/'
imagePath = downloadPath + 'img/'
taskPath = downloadPath + 'task/'
def download_img(url, userName):
""" 下载图片到download/文件夹下
"""
loc = imagePath + userName + '.png'
if os.path.exists(loc):
return
try:
# 下载图片
request.urlretrieve(url, filename=loc)
except:
print("\n无法下载文件")
def crawler(taskque, que):
""" get task from taskque
"""
try:
# Init browser
browser = LuoguBrowser()
browser.openURL(defaultURL)
except Exception as e:
print("无法创建")
print(e)
return
while True:
try:
i = taskque.get(block=True, timeout=1)
except queue.Empty:
print('无更多任务')
print('请等待结束')
return
try:
# Get messageURL
messageURL = userURL + str(i)
## View Web
browser.openURL(messageURL)
## getData
html = browser.getData()
html = LuoguBrowser.ungzip(html)
soup = BeautifulSoup(html, 'html.parser')
# print(soup)
board = soup.find(
'ul', {'class': 'am-list am-list-static lg-summary-list'})
items = board.find_all("li")
# 0
userName = soup.find('span', {'name': 'username'}).get_text()
avatar = items[0].find('img')['src']
# 1
allPost = items[1].find_all('span', {'class': 'lg-bignum-num'})
Num = allPost[0].get_text()
ACNum = allPost[1].get_text()
# 2
Acts = items[4].find('span', {'class': 'lg-right'}).get_text()
Acts = Acts.split('/')
contribute = Acts[0]
active = Acts[1]
integral = Acts[2]
# 3
Type = items[5].find('span', {'class': 'lg-right'}).get_text()
# 4
registeredTime = items[6].find('span', {
'class': 'lg-right'
}).get_text()
# make t
t = [
i, userName, avatar, Num, ACNum, contribute, active, integral,
Type, registeredTime
]
# 下载图片
download_img(avatar, str(i))
# finish
taskque.task_done()
que.put(t)
except AttributeError:
que.put([i, '无此人'])
print('找不到id:', i)
except Exception as e:
print(e)
def saveThread(que, sheet):
while True:
try:
t = que.get(block=True, timeout=60)
if t[1] != '-1':
sheet.append(t)
path = taskPath + str(t[0])
if os.path.exists(path):
os.remove(path)
except queue.Empty:
return
que.task_done()
def getLine(num):
""" 返回是否为true
"""
if os.path.exists(taskPath + str(num)):
return True
return False
def getTaskThread(que, filePath):
""" 创建任务列队
"""
# thread = threading.current_thread()
tgroup = os.listdir(taskPath)
for item in tgroup:
try:
que.put(int(item))
except ValueError:
print(item)
print('剩余任务数量:', que.qsize())
def init():
print('初始化中')
if not os.path.exists(downloadPath):
print('正在创建文件夹download...')
os.makedirs(downloadPath)
print('done...')
if not os.path.exists(taskPath):
print('正在创建task文件')
os.makedirs(taskPath)
# 第一次跑脚本时候使用
taskMaker(start=start_num, end=end_num)
print('done...')
if not os.path.exists(imagePath):
print('正在创建文件夹image...')
os.makedirs(imagePath)
print('done...')
if not os.path.exists(wbName):
print('正在创建Excel...')
wb = Workbook()
wb.create_sheet(title=wsName)
wb.save(wbName)
print('done...')
print('初始化完成')
def taskMaker(start=1, end=100):
""" 初始化任务表
"""
if not os.path.exists(taskPath):
os.makedirs(taskPath)
for i in range(start, end):
f = open(taskPath + str(i), mode='w')
f.close()
return
def backgroundThread(saveQue, taskQue):
while True:
sz = saveQue.qsize()
print('待保存量:', sz)
sz = taskQue.qsize()
print('剩余任务:', sz)
time.sleep(30)
def main():
# MARK -- 参考答案:https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error
ssl._create_default_https_context = ssl._create_unverified_context
# init
init()
# load data
print('loading...')
wb = load_workbook(wbName)
sheet = wb[wsName]
sheet.append(title)
# thread
saveQue = Queue()
taskQue = Queue()
thread = []
for i in range(0, 9): # 爬虫线程列队
t = threading.Thread(
target=crawler, name=str(i), args=(taskQue, saveQue))
thread.append(t)
st = threading.Thread(
target=saveThread, name='saveThread', args=(saveQue, sheet))
gt = threading.Thread(
target=getTaskThread, name='getTaskThread', args=(taskQue, taskPath))
bg = threading.Thread(
target=backgroundThread,
name='backgroundThread',
args=(saveQue, taskQue))
print('loading...')
try:
print('start!')
gt.start()
gt.join()
for t in thread:
t.start()
st.start()
bg.start()
st.join()
except:
print("线程错误")
finally:
wb.save(wbName)
if __name__:
main()
|
crypto_util_test.py
|
"""Tests for acme.crypto_util."""
import itertools
import socket
import threading
import time
import unittest
import six
from six.moves import socketserver # pylint: disable=import-error
from acme import errors
from acme import jose
from acme import test_util
class SSLSocketAndProbeSNITest(unittest.TestCase):
"""Tests for acme.crypto_util.SSLSocket/probe_sni."""
def setUp(self):
self.cert = test_util.load_comparable_cert('cert.pem')
key = test_util.load_pyopenssl_private_key('rsa512_key.pem')
# pylint: disable=protected-access
certs = {b'foo': (key, self.cert.wrapped)}
from acme.crypto_util import SSLSocket
class _TestServer(socketserver.TCPServer):
# pylint: disable=too-few-public-methods
# six.moves.* | pylint: disable=attribute-defined-outside-init,no-init
def server_bind(self): # pylint: disable=missing-docstring
self.socket = SSLSocket(socket.socket(), certs=certs)
socketserver.TCPServer.server_bind(self)
self.server = _TestServer(('', 0), socketserver.BaseRequestHandler)
self.port = self.server.socket.getsockname()[1]
self.server_thread = threading.Thread(
# pylint: disable=no-member
target=self.server.handle_request)
self.server_thread.start()
time.sleep(1) # TODO: avoid race conditions in other way
def tearDown(self):
self.server_thread.join()
def _probe(self, name):
from acme.crypto_util import probe_sni
return jose.ComparableX509(probe_sni(
name, host='127.0.0.1', port=self.port))
def test_probe_ok(self):
self.assertEqual(self.cert, self._probe(b'foo'))
def test_probe_not_recognized_name(self):
self.assertRaises(errors.Error, self._probe, b'bar')
# TODO: py33/py34 tox hangs forever on do_hendshake in second probe
#def probe_connection_error(self):
# self._probe(b'foo')
# #time.sleep(1) # TODO: avoid race conditions in other way
# self.assertRaises(errors.Error, self._probe, b'bar')
class PyOpenSSLCertOrReqSANTest(unittest.TestCase):
"""Test for acme.crypto_util._pyopenssl_cert_or_req_san."""
@classmethod
def _call(cls, loader, name):
# pylint: disable=protected-access
from acme.crypto_util import _pyopenssl_cert_or_req_san
return _pyopenssl_cert_or_req_san(loader(name))
@classmethod
def _get_idn_names(cls):
"""Returns expected names from '{cert,csr}-idnsans.pem'."""
chars = [six.unichr(i) for i in itertools.chain(range(0x3c3, 0x400),
range(0x641, 0x6fc),
range(0x1820, 0x1877))]
return [''.join(chars[i: i + 45]) + '.invalid'
for i in range(0, len(chars), 45)]
def _call_cert(self, name):
return self._call(test_util.load_cert, name)
def _call_csr(self, name):
return self._call(test_util.load_csr, name)
def test_cert_no_sans(self):
self.assertEqual(self._call_cert('cert.pem'), [])
def test_cert_two_sans(self):
self.assertEqual(self._call_cert('cert-san.pem'),
['example.com', 'www.example.com'])
def test_cert_hundred_sans(self):
self.assertEqual(self._call_cert('cert-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_cert_idn_sans(self):
self.assertEqual(self._call_cert('cert-idnsans.pem'),
self._get_idn_names())
def test_csr_no_sans(self):
self.assertEqual(self._call_csr('csr-nosans.pem'), [])
def test_csr_one_san(self):
self.assertEqual(self._call_csr('csr.pem'), ['example.com'])
def test_csr_two_sans(self):
self.assertEqual(self._call_csr('csr-san.pem'),
['example.com', 'www.example.com'])
def test_csr_six_sans(self):
self.assertEqual(self._call_csr('csr-6sans.pem'),
['example.com', 'example.org', 'example.net',
'example.info', 'subdomain.example.com',
'other.subdomain.example.com'])
def test_csr_hundred_sans(self):
self.assertEqual(self._call_csr('csr-100sans.pem'),
['example{0}.com'.format(i) for i in range(1, 101)])
def test_csr_idn_sans(self):
self.assertEqual(self._call_csr('csr-idnsans.pem'),
self._get_idn_names())
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
collaborative.py
|
from __future__ import annotations
import logging
from dataclasses import dataclass
from threading import Thread, Lock, Event
from typing import Dict, Optional, Iterator
import numpy as np
import torch
from pydantic import BaseModel, StrictBool, StrictFloat, confloat, conint
from hivemind.averaging.training import TrainingAverager
from hivemind.dht import DHT
from hivemind.dht.crypto import RSASignatureValidator
from hivemind.dht.schema import BytesWithPublicKey, SchemaValidator
from hivemind.optim.base import DecentralizedOptimizerBase
from hivemind.optim.performance_ema import PerformanceEMA
from hivemind.utils import Endpoint, get_dht_time, get_logger
logger = get_logger(__name__)
LRSchedulerBase = getattr(torch.optim.lr_scheduler, "_LRScheduler", None)
@dataclass(frozen=False)
class CollaborationState:
optimizer_step: int
samples_accumulated: int
target_batch_size: int
num_peers: int
num_clients: int
eta_next_step: float
next_fetch_time: float
@property
def ready_for_step(self):
return self.samples_accumulated >= self.target_batch_size or get_dht_time() >= self.eta_next_step
def register_step(self, local_step: int):
self.optimizer_step = max(local_step, self.optimizer_step)
self.samples_accumulated = 0
self.eta_next_step = float("inf")
class TrainingState(BaseModel):
endpoint: Endpoint
step: conint(ge=0, strict=True)
samples_accumulated: conint(ge=0, strict=True)
samples_per_second: confloat(ge=0.0, strict=True)
time: StrictFloat
client_mode: StrictBool
class TrainingProgressSchema(BaseModel):
progress: Dict[BytesWithPublicKey, Optional[TrainingState]]
class CollaborativeOptimizer(DecentralizedOptimizerBase):
"""
An optimizer that performs model updates after collaboratively accumulating a target (large) batch size across peers
These optimizers use DHT to track how much progress did the collaboration make towards target batch size.
Once enough samples were accumulated, optimizers will compute a weighted average of their statistics.
:note: This optimizer behaves unlike regular pytorch optimizers in two ways:
* calling .step will periodically zero-out gradients w.r.t. model parameters after each step
* it may take multiple .step calls without updating model parameters, waiting for peers to accumulate enough samples
:param opt: a standard pytorch optimizer, preferably a large-batch one such as LAMB, LARS, etc.
:param dht: a running hivemind.DHT daemon connected to other peers
:param prefix: a common prefix for all metadata stored by CollaborativeOptimizer in the DHT
:param target_batch_size: perform optimizer step after all peers collectively accumulate this many samples
:param batch_size_per_step: before each call to .step, user should accumulate gradients over this many samples
:param min_refresh_period: wait for at least this many seconds before fetching new collaboration state
:param max_refresh_period: wait for at most this many seconds before fetching new collaboration state
:param default_refresh_period: if no peers are detected, attempt to fetch collaboration state this often (seconds)
:param expected_drift_peers: assume that this many new peers can join between steps
:param expected_drift_rate: assumes that this fraction of current collaboration can join/leave between steps
:note: The expected collaboration drift parameters are used to adjust the frequency with which this optimizer will
refresh the collaboration-wide statistics (to avoid missing the moment when to run the next step)
:param bandwidth: peer's network bandwidth for the purpose of load balancing (recommended: internet speed in mbps)
:param step_tolerance: a peer can temporarily be delayed by this many steps without being deemed out of sync
:param performance_ema_alpha: smoothing value used to estimate this peer's performance (training samples per second)
:param averaging_expiration: peer's requests for averaging will be valid for this many seconds
:param metadata_expiration: peer's metadata (e.g. samples processed) is stored onto DHT for this many seconds
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled.
:param scheduler: if specified, use this scheduler to update optimizer learning rate
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param accumulate_grads_on: if specified, accumulate gradients on this device. By default, this will use the same
device as model parameters. One can specify a different device (e.g. 'cpu' vs 'cuda') to save device memory at
the cost of extra time per step. If reuse_gradient_accumulators is True, this parameter has no effect.
:param client_mode: if True, runs training without incoming connections, in a firewall-compatible mode
:param kwargs: additional parameters forwarded to DecentralizedAverager
:note: If you are using CollaborativeOptimizer with lr_scheduler, it is recommended to pass this scheduler
explicitly into this class. Otherwise, scheduler may not be synchronized between peers.
"""
def __init__(
self,
opt: torch.optim.Optimizer,
*,
dht: DHT,
prefix: str,
target_batch_size: int,
batch_size_per_step: Optional[int] = None,
scheduler: Optional[LRSchedulerBase] = None,
min_refresh_period: float = 0.5,
max_refresh_period: float = 30,
default_refresh_period: float = 3,
expected_drift_peers: float = 3,
expected_drift_rate: float = 0.2,
performance_ema_alpha: float = 0.1,
metadata_expiration: float = 60.0,
averaging_timeout: Optional[float] = None,
step_tolerance: int = 1,
reuse_grad_buffers: bool = False,
accumulate_grads_on: Optional[torch.device] = None,
client_mode: bool = False,
verbose: bool = False,
**kwargs,
):
super().__init__(opt, dht)
signature_validator = RSASignatureValidator()
self._local_public_key = signature_validator.local_public_key
dht.add_validators([SchemaValidator(TrainingProgressSchema, prefix=prefix), signature_validator])
if reuse_grad_buffers and accumulate_grads_on is not None:
logger.warning("Setting 'accumulate_grads_on' has no effect if reuse_grad_buffers=True")
self.prefix, self.scheduler = prefix, scheduler
self.target_batch_size, self.batch_size_per_step = target_batch_size, batch_size_per_step
self.min_refresh_period, self.max_refresh_period, self.default_refresh_period = (
min_refresh_period,
max_refresh_period,
default_refresh_period,
)
self.expected_drift_peers, self.expected_drift_rate = expected_drift_peers, expected_drift_rate
self.averaging_timeout, self.metadata_expiration = averaging_timeout, metadata_expiration
self._grads, self.reuse_grad_buffers, self.accumulate_grads_on = None, reuse_grad_buffers, accumulate_grads_on
self.client_mode, self.step_tolerance = client_mode, step_tolerance
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.averager = self._make_averager(**kwargs)
self.training_progress_key = f"{self.prefix}_progress"
self.local_samples_accumulated = 0 # a number of local samples accumulated since last optimizer update
self.local_steps_accumulated = 0 # a number of calls to step() since last optimizer update
self.performance_ema = PerformanceEMA(alpha=performance_ema_alpha)
self.last_step_time = None
self.collaboration_state = self.fetch_collaboration_state()
self.lock_collaboration_state, self.collaboration_state_updated = Lock(), Event()
self.lock_local_progress, self.should_report_progress = Lock(), Event()
self.progress_reporter = Thread(target=self.report_training_progress, daemon=True, name=f"{self}.reporter")
self.progress_reporter.start()
self.collaboration_state_updater = Thread(
target=self.check_collaboration_state_periodically, daemon=True, name=f"{self}.collaboration_state_updater"
)
self.collaboration_state_updater.start()
def _make_averager(self, **kwargs):
return TrainingAverager(
self.opt,
dht=self.dht,
average_parameters=True,
average_gradients=True,
prefix=f"{self.prefix}_averaging",
allreduce_timeout=self.averaging_timeout,
client_mode=self.client_mode,
**kwargs,
)
@property
def local_step(self) -> int:
return self.averager.local_step
@property
def is_synchronized(self) -> bool:
return self.local_step >= self.collaboration_state.optimizer_step - self.step_tolerance
def is_alive(self) -> bool:
return self.averager.is_alive()
def load_state_from_peers(self, **kwargs):
"""Attempt to fetch the newest collaboration state from other peers"""
with self.lock_collaboration_state:
self.averager.load_state_from_peers(**kwargs)
self.local_samples_accumulated = self.local_steps_accumulated = 0
self.reset_accumulated_grads_()
self.update_scheduler()
def step(self, batch_size: Optional[int] = None, **kwargs):
"""
Report accumulating gradients w.r.t. batch_size additional samples, optionally update model parameters
:param batch_size: optional override for batch_size_per_step from init
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if self.batch_size_per_step is None:
if batch_size is None:
raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step")
logger.log(self.status_loglevel, f"Setting default batch_size_per_step to {batch_size}")
self.batch_size_per_step = batch_size
batch_size = batch_size if batch_size is not None else self.batch_size_per_step
if not self.is_synchronized:
logger.log(self.status_loglevel, "Peer is out of sync.")
self.load_state_from_peers()
return
if self.last_step_time is not None and get_dht_time() - self.last_step_time > self.metadata_expiration:
logger.warning(
f"Training step took {get_dht_time() - self.last_step_time}, "
f"but metadata expired in {self.metadata_expiration} s."
)
self.accumulate_grads_(batch_size)
with self.lock_local_progress:
self.local_samples_accumulated += batch_size
self.local_steps_accumulated += 1
self.performance_ema.update(num_processed=batch_size)
self.should_report_progress.set()
if not self.collaboration_state.ready_for_step:
return
logger.log(self.status_loglevel, f"Beginning global optimizer step {self.collaboration_state.optimizer_step}")
self.collaboration_state = self.fetch_collaboration_state()
self.collaboration_state_updated.set()
if not self.is_synchronized:
self.load_state_from_peers()
return
with self.performance_ema.pause(), self.lock_collaboration_state:
# divide accumulators by local steps to recover the true average grad w.r.t. local_samples_accumulated
self.apply_accumulated_grads_(scale_by=1.0 / self.local_steps_accumulated)
current_step, group_info = self.averager.local_step, None
if self.collaboration_state.num_peers > 1:
mean_samples_per_worker = self.target_batch_size / self.collaboration_state.num_peers
weight = self.local_samples_accumulated / mean_samples_per_worker
try:
group_info = self.averager.step(weight=weight, timeout=self.averaging_timeout, **kwargs)
if group_info:
logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers")
except BaseException as e:
logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}.")
else:
logger.log(
self.status_loglevel,
f"Skipped averaging: collaboration consists of " f"{self.collaboration_state.num_peers} peer(s).",
)
self.opt.step()
self.reset_accumulated_grads_()
self.local_samples_accumulated = self.local_steps_accumulated = 0
self.collaboration_state.register_step(current_step + 1)
self.averager.local_step = current_step + 1
self.collaboration_state_updated.set()
self.update_scheduler()
logger.log(self.status_loglevel, f"Optimizer step: done!")
return group_info
def step_aux(self, **kwargs):
"""
Find and assist other peers in averaging without sending local gradients.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if not self.collaboration_state.ready_for_step:
return
logger.log(self.status_loglevel, f"Beginning global optimizer step {self.collaboration_state.optimizer_step}")
self.collaboration_state = self.fetch_collaboration_state()
self.collaboration_state_updated.set()
with self.lock_collaboration_state:
# divide accumulators by local steps to recover the true average grad w.r.t. local_samples_accumulated
current_step, group_info = self.averager.local_step, None
try:
group_info = self.averager.step(timeout=self.averaging_timeout, **kwargs)
if group_info:
logger.log(self.status_loglevel, f"Averaged tensors successfully with {len(group_info)} peers")
except BaseException as e:
logger.log(self.status_loglevel, f"Skipped averaging: averaging round failed with {repr(e)}.")
self.collaboration_state.register_step(current_step + 1)
self.averager.local_step = current_step + 1
self.collaboration_state_updated.set()
logger.log(self.status_loglevel, f"Optimizer step: done!")
return group_info
def _grad_buffers(self) -> Iterator[torch.Tensor]:
"""pytorch-internal gradient buffers"""
for param_group in self.opt.param_groups:
for param in param_group["params"]:
if param.grad is None:
yield torch.zeros_like(param)
else:
yield param.grad
@torch.no_grad()
def accumulated_grads(self) -> Iterator[torch.Tensor]:
"""local gradient accumulators"""
if self.reuse_grad_buffers:
yield from self._grad_buffers()
elif self._grads is None:
with torch.no_grad():
self._grads = [
torch.zeros_like(grad, device=self.accumulate_grads_on) for grad in self._grad_buffers()
]
yield from self._grads
@torch.no_grad()
def accumulate_grads_(self, batch_size: int):
"""add current gradients to grad accumulators (if any)"""
if self.reuse_grad_buffers:
return # user is responsible for accumulating gradients in .grad buffers
alpha = float(batch_size) / self.batch_size_per_step
for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()):
grad_acc.add_(grad_buf.to(grad_acc.device), alpha=alpha)
@torch.no_grad()
def apply_accumulated_grads_(self, scale_by: Optional[float] = None):
if self.reuse_grad_buffers:
return
for grad_buf, grad_acc in zip(self._grad_buffers(), self.accumulated_grads()):
grad_buf[...] = grad_acc.to(grad_buf.device)
if scale_by is not None:
grad_buf.mul_(scale_by)
@torch.no_grad()
def reset_accumulated_grads_(self):
if self.reuse_grad_buffers:
self.opt.zero_grad()
else:
for grad_buf in self.accumulated_grads():
grad_buf.zero_()
def report_training_progress(self):
"""Periodically publish metadata and the current number of samples accumulated towards the next step"""
while self.is_alive():
self.should_report_progress.wait()
self.should_report_progress.clear()
with self.lock_local_progress:
current_time = get_dht_time()
local_state_info = TrainingState(
endpoint=self.averager.endpoint,
step=self.local_step,
samples_accumulated=self.local_samples_accumulated,
samples_per_second=self.performance_ema.samples_per_second,
time=current_time,
client_mode=self.averager.client_mode,
)
self.dht.store(
key=self.training_progress_key,
subkey=self._local_public_key,
value=local_state_info.dict(),
expiration_time=current_time + self.metadata_expiration,
return_future=True,
)
def check_collaboration_state_periodically(self):
"""
Periodically check the training progress from all peers. Trigger update after target_batch_size total samples
"""
while self.is_alive():
time_to_next_update = max(0.0, self.collaboration_state.next_fetch_time - get_dht_time())
if self.collaboration_state_updated.wait(time_to_next_update):
self.collaboration_state_updated.clear()
continue # if state was updated externally, reset timer
with self.lock_collaboration_state:
self.collaboration_state = self.fetch_collaboration_state()
def fetch_collaboration_state(self) -> CollaborationState:
"""Read performance statistics reported by peers, estimate progress towards next batch"""
response, _expiration = self.dht.get(self.training_progress_key, latest=True) or (None, -float("inf"))
current_time = get_dht_time()
if not isinstance(response, dict) or len(response) == 0:
logger.log(self.status_loglevel, f"Found no active peers: {response}")
local_eta_next_step = (
max(0, self.target_batch_size - self.local_steps_accumulated) / self.performance_ema.samples_per_second
)
return CollaborationState(
self.local_step,
self.local_samples_accumulated,
self.target_batch_size,
num_peers=0,
num_clients=0,
eta_next_step=current_time + local_eta_next_step,
next_fetch_time=current_time + self.default_refresh_period,
)
valid_peer_states = [
TrainingState.parse_obj(peer_state.value)
for peer_state in response.values()
if peer_state.value is not None
]
num_peers = len(valid_peer_states)
num_clients = sum(state.client_mode for state in valid_peer_states)
global_optimizer_step = self.local_step
for state in valid_peer_states:
if not state.client_mode:
global_optimizer_step = max(global_optimizer_step, state.step)
total_samples_accumulated = estimated_current_samples = total_samples_per_second = 0
for state in valid_peer_states:
total_samples_per_second += state.samples_per_second
if state.step == global_optimizer_step:
total_samples_accumulated += state.samples_accumulated
estimated_current_samples += (
state.samples_accumulated + max(0, current_time - state.time) * state.samples_per_second
)
# note: we deliberately count only valid peers for samples_accumulated, but all peers for performance;
# the rationale behind this is that outdated peers will synchronize and begin contributing shortly.
estimated_samples_remaining = self.target_batch_size - estimated_current_samples
estimated_time_to_next_step = max(0, estimated_samples_remaining) / total_samples_per_second
expected_max_peers = max(num_peers + self.expected_drift_peers, num_peers * (1 + self.expected_drift_rate))
time_to_next_fetch = float(
np.clip(
a=estimated_time_to_next_step * num_peers / expected_max_peers,
a_min=self.min_refresh_period,
a_max=self.max_refresh_period,
)
)
logger.log(
self.status_loglevel,
f"Collaboration accumulated {total_samples_accumulated} samples from "
f"{num_peers} peers; ETA {estimated_time_to_next_step:.2f} seconds "
f"(refresh in {time_to_next_fetch:.2f}s.)",
)
return CollaborationState(
global_optimizer_step,
total_samples_accumulated,
target_batch_size=self.target_batch_size,
num_peers=num_peers,
num_clients=num_clients,
eta_next_step=current_time + estimated_time_to_next_step,
next_fetch_time=current_time + time_to_next_fetch,
)
def zero_grad(self, *args, **kwargs):
if self.reuse_grad_buffers:
raise ValueError(
f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never "
f"call zero_grad manually. Gradients will be refreshed internally."
)
return self.opt.zero_grad(*args, **kwargs)
def update_scheduler(self):
if self.scheduler:
while self.scheduler._step_count < self.local_step:
self.scheduler.step()
def shutdown(self):
logger.debug("Shutting down averager...")
self.averager.shutdown()
logger.debug("Sending goodbye to peers...")
self.dht.store(
self.training_progress_key,
subkey=self._local_public_key,
value=None,
expiration_time=get_dht_time() + self.metadata_expiration,
)
logger.debug(f"{self.__class__.__name__} is shut down.")
def __del__(self):
self.shutdown()
|
util.py
|
import os
import re
import sys
import time
import json
from urllib.request import Request, urlopen
from urllib.parse import urlparse
from decimal import Decimal
from urllib.parse import quote
from datetime import datetime
from subprocess import TimeoutExpired, Popen, PIPE, DEVNULL, CompletedProcess, CalledProcessError
from multiprocessing import Process
from config import (
ANSI,
IS_TTY,
TERM_WIDTH,
REPO_DIR,
OUTPUT_DIR,
SOURCES_DIR,
ARCHIVE_DIR,
OUTPUT_PERMISSIONS,
TIMEOUT,
SHOW_PROGRESS,
CHECK_SSL_VALIDITY,
WGET_USER_AGENT,
CURL_BINARY,
WGET_BINARY,
CHROME_BINARY,
GIT_BINARY,
YOUTUBEDL_BINARY,
FETCH_TITLE,
FETCH_FAVICON,
FETCH_WGET,
FETCH_WARC,
FETCH_PDF,
FETCH_SCREENSHOT,
FETCH_DOM,
FETCH_GIT,
FETCH_MEDIA,
SUBMIT_ARCHIVE_DOT_ORG,
)
# URL helpers: https://docs.python.org/3/library/urllib.parse.html#url-parsing
scheme = lambda url: urlparse(url).scheme
without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
path = lambda url: urlparse(url).path
basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
domain = lambda url: urlparse(url).netloc
query = lambda url: urlparse(url).query
fragment = lambda url: urlparse(url).fragment
extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
short_ts = lambda ts: ts.split('.')[0]
URL_REGEX = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))[^<\""]+'
HTML_TITLE_REGEX = '<title>(.[^<>]+)'
def check_dependencies():
"""Check that all necessary dependencies are installed, and have valid versions"""
python_vers = float('{}.{}'.format(sys.version_info.major, sys.version_info.minor))
if python_vers < 3.5:
print('{}[X] Python version is not new enough: {} (>3.5 is required){}'.format(ANSI['red'], python_vers, ANSI['reset']))
print(' See https://github.com/pirate/ArchiveBox#troubleshooting for help upgrading your Python installation.')
raise SystemExit(1)
if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG:
if run(['which', CURL_BINARY], stdout=DEVNULL).returncode or run([CURL_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: curl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CURL_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_WGET or FETCH_WARC:
if run(['which', WGET_BINARY], stdout=DEVNULL).returncode or run([WGET_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: wget{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(WGET_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_PDF or FETCH_SCREENSHOT or FETCH_DOM:
if run(['which', CHROME_BINARY], stdout=DEVNULL).returncode:
print('{}[X] Missing dependency: {}{}'.format(ANSI['red'], CHROME_BINARY, ANSI['reset']))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
# parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
try:
result = run([CHROME_BINARY, '--version'], stdout=PIPE)
version_str = result.stdout.decode('utf-8')
version_lines = re.sub("(Google Chrome|Chromium) (\\d+?)\\.(\\d+?)\\.(\\d+?).*?$", "\\2", version_str).split('\n')
version = [l for l in version_lines if l.isdigit()][-1]
if int(version) < 59:
print(version_lines)
print('{red}[X] Chrome version must be 59 or greater for headless PDF, screenshot, and DOM saving{reset}'.format(**ANSI))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
except (IndexError, TypeError, OSError):
print('{red}[X] Failed to parse Chrome version, is it installed properly?{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(CHROME_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_GIT:
if run(['which', GIT_BINARY], stdout=DEVNULL).returncode or run([GIT_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: git{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(GIT_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
if FETCH_MEDIA:
if run(['which', YOUTUBEDL_BINARY], stdout=DEVNULL).returncode or run([YOUTUBEDL_BINARY, '--version'], stdout=DEVNULL).returncode:
print('{red}[X] Missing dependency: youtube-dl{reset}'.format(**ANSI))
print(' Run ./setup.sh, then confirm it was installed with: {} --version'.format(YOUTUBEDL_BINARY))
print(' See https://github.com/pirate/ArchiveBox for help.')
raise SystemExit(1)
def chmod_file(path, cwd='.', permissions=OUTPUT_PERMISSIONS, timeout=30):
"""chmod -R <permissions> <cwd>/<path>"""
if not os.path.exists(os.path.join(cwd, path)):
raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
chmod_result = run(['chmod', '-R', permissions, path], cwd=cwd, stdout=DEVNULL, stderr=PIPE, timeout=timeout)
if chmod_result.returncode == 1:
print(' ', chmod_result.stderr.decode())
raise Exception('Failed to chmod {}/{}'.format(cwd, path))
def progress(seconds=TIMEOUT, prefix=''):
"""Show a (subprocess-controlled) progress bar with a <seconds> timeout,
returns end() function to instantly finish the progress
"""
if not SHOW_PROGRESS:
return lambda: None
def progress_bar(seconds, prefix):
"""show timer in the form of progress bar, with percentage and seconds remaining"""
chunk = '█' if sys.stdout.encoding == 'UTF-8' else '#'
chunks = TERM_WIDTH - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
try:
for s in range(seconds * chunks):
progress = s / chunks / seconds * 100
bar_width = round(progress/(100/chunks))
# ████████████████████ 0.9% (1/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['green'],
(chunk * bar_width).ljust(chunks),
ANSI['reset'],
round(progress, 1),
round(s/chunks),
seconds,
))
sys.stdout.flush()
time.sleep(1 / chunks)
# ██████████████████████████████████ 100.0% (60/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)\n'.format(
prefix,
ANSI['red'],
chunk * chunks,
ANSI['reset'],
100.0,
seconds,
seconds,
))
sys.stdout.flush()
except KeyboardInterrupt:
print()
pass
p = Process(target=progress_bar, args=(seconds, prefix))
p.start()
def end():
"""immediately finish progress and clear the progressbar line"""
# protect from double termination
#if p is None or not hasattr(p, 'kill'):
# return
nonlocal p
if p is not None:
p.terminate()
p = None
sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH), ANSI['reset'])) # clear whole terminal line
sys.stdout.flush()
return end
def pretty_path(path):
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
return path.replace(REPO_DIR + '/', '')
def save_source(raw_text):
if not os.path.exists(SOURCES_DIR):
os.makedirs(SOURCES_DIR)
ts = str(datetime.now().timestamp()).split('.', 1)[0]
source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format('stdin', ts))
with open(source_path, 'w', encoding='utf-8') as f:
f.write(raw_text)
return source_path
def fetch_page_content(url, timeout=TIMEOUT):
req = Request(url, headers={'User-Agent': WGET_USER_AGENT})
if CHECK_SSL_VALIDITY:
resp = urlopen(req, timeout=timeout)
else:
import ssl
insecure = ssl._create_unverified_context()
resp = urlopen(req, timeout=timeout, context=insecure)
encoding = resp.headers.get_content_charset() or 'utf-8'
return resp.read().decode(encoding)
def download_url(url, timeout=TIMEOUT):
"""download a given url's content into downloads/domain.txt"""
if not os.path.exists(SOURCES_DIR):
os.makedirs(SOURCES_DIR)
ts = str(datetime.now().timestamp()).split('.', 1)[0]
source_path = os.path.join(SOURCES_DIR, '{}-{}.txt'.format(domain(url), ts))
print('[*] [{}] Downloading {} > {}'.format(
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
url,
pretty_path(source_path),
))
end = progress(TIMEOUT, prefix=' ')
try:
downloaded_xml = fetch_page_content(url, timeout=timeout)
end()
except Exception as e:
end()
print('[!] Failed to download {}\n'.format(url))
print(' ', e)
raise SystemExit(1)
with open(source_path, 'w', encoding='utf-8') as f:
f.write(downloaded_xml)
return source_path
def fetch_page_title(url, timeout=10, progress=SHOW_PROGRESS):
"""Attempt to guess a page's title by downloading the html"""
if not FETCH_TITLE:
return None
try:
if progress:
sys.stdout.write('.')
sys.stdout.flush()
html = fetch_page_content(url, timeout=timeout)
match = re.search(HTML_TITLE_REGEX, html)
return match.group(1).strip() if match else None
except Exception as err:
# print('[!] Failed to fetch title because of {}: {}'.format(
# err.__class__.__name__,
# err,
# ))
return None
def str_between(string, start, end=None):
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
content = string.split(start, 1)[-1]
if end is not None:
content = content.rsplit(end, 1)[0]
return content
def get_link_type(link):
"""Certain types of links need to be handled specially, this figures out when that's the case"""
if link['base_url'].endswith('.pdf'):
return 'PDF'
elif link['base_url'].rsplit('.', 1) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'):
return 'image'
elif 'wikipedia.org' in link['domain']:
return 'wiki'
elif 'youtube.com' in link['domain']:
return 'youtube'
elif 'soundcloud.com' in link['domain']:
return 'soundcloud'
elif 'youku.com' in link['domain']:
return 'youku'
elif 'vimeo.com' in link['domain']:
return 'vimeo'
return None
def merge_links(a, b):
"""deterministially merge two links, favoring longer field values over shorter,
and "cleaner" values over worse ones.
"""
longer = lambda key: (a[key] if len(a[key]) > len(b[key]) else b[key]) if (a[key] and b[key]) else (a[key] or b[key])
earlier = lambda key: a[key] if a[key] < b[key] else b[key]
url = longer('url')
longest_title = longer('title')
cleanest_title = a['title'] if '://' not in (a['title'] or '') else b['title']
link = {
'timestamp': earlier('timestamp'),
'url': url,
'domain': domain(url),
'base_url': base_url(url),
'tags': longer('tags'),
'title': longest_title if '://' not in (longest_title or '') else cleanest_title,
'sources': list(set(a.get('sources', []) + b.get('sources', []))),
}
link['type'] = get_link_type(link)
return link
def find_link(folder, links):
"""for a given archive folder, find the corresponding link object in links"""
url = parse_url(folder)
if url:
for link in links:
if (link['base_url'] in url) or (url in link['url']):
return link
timestamp = folder.split('.')[0]
for link in links:
if link['timestamp'].startswith(timestamp):
if link['domain'] in os.listdir(os.path.join(ARCHIVE_DIR, folder)):
return link # careful now, this isn't safe for most ppl
if link['domain'] in parse_url(folder):
return link
return None
def parse_url(folder):
"""for a given archive folder, figure out what url it's for"""
link_json = os.path.join(ARCHIVE_DIR, folder, 'index.json')
if os.path.exists(link_json):
with open(link_json, 'r') as f:
try:
link_json = f.read().strip()
if link_json:
link = json.loads(link_json)
return link['base_url']
except ValueError:
print('File contains invalid JSON: {}!'.format(link_json))
archive_org_txt = os.path.join(ARCHIVE_DIR, folder, 'archive.org.txt')
if os.path.exists(archive_org_txt):
with open(archive_org_txt, 'r') as f:
original_link = f.read().strip().split('/http', 1)[-1]
with_scheme = 'http{}'.format(original_link)
return with_scheme
return ''
def manually_merge_folders(source, target):
"""prompt for user input to resolve a conflict between two archive folders"""
if not IS_TTY:
return
fname = lambda path: path.split('/')[-1]
print(' {} and {} have conflicting files, which do you want to keep?'.format(fname(source), fname(target)))
print(' - [enter]: do nothing (keep both)')
print(' - a: prefer files from {}'.format(source))
print(' - b: prefer files from {}'.format(target))
print(' - q: quit and resolve the conflict manually')
try:
answer = input('> ').strip().lower()
except KeyboardInterrupt:
answer = 'q'
assert answer in ('', 'a', 'b', 'q'), 'Invalid choice.'
if answer == 'q':
print('\nJust run ArchiveBox again to pick up where you left off.')
raise SystemExit(0)
elif answer == '':
return
files_in_source = set(os.listdir(source))
files_in_target = set(os.listdir(target))
for file in files_in_source:
if file in files_in_target:
to_delete = target if answer == 'a' else source
run(['rm', '-Rf', os.path.join(to_delete, file)])
run(['mv', os.path.join(source, file), os.path.join(target, file)])
if not set(os.listdir(source)):
run(['rm', '-Rf', source])
def fix_folder_path(archive_path, link_folder, link):
"""given a folder, merge it to the canonical 'correct' path for the given link object"""
source = os.path.join(archive_path, link_folder)
target = os.path.join(archive_path, link['timestamp'])
url_in_folder = parse_url(source)
if not (url_in_folder in link['base_url']
or link['base_url'] in url_in_folder):
raise ValueError('The link does not match the url for this folder.')
if not os.path.exists(target):
# target doesn't exist so nothing needs merging, simply move A to B
run(['mv', source, target])
else:
# target folder exists, check for conflicting files and attempt manual merge
files_in_source = set(os.listdir(source))
files_in_target = set(os.listdir(target))
conflicting_files = files_in_source & files_in_target
if not conflicting_files:
for file in files_in_source:
run(['mv', os.path.join(source, file), os.path.join(target, file)])
if os.path.exists(source):
files_in_source = set(os.listdir(source))
if files_in_source:
manually_merge_folders(source, target)
else:
run(['rm', '-R', source])
def migrate_data():
# migrate old folder to new OUTPUT folder
old_dir = os.path.join(REPO_DIR, 'html')
if os.path.exists(old_dir):
print('[!] WARNING: Moved old output folder "html" to new location: {}'.format(OUTPUT_DIR))
run(['mv', old_dir, OUTPUT_DIR], timeout=10)
def cleanup_archive(archive_path, links):
"""move any incorrectly named folders to their canonical locations"""
# for each folder that exists, see if we can match it up with a known good link
# if we can, then merge the two folders (TODO: if not, move it to lost & found)
unmatched = []
bad_folders = []
if not os.path.exists(archive_path):
return
for folder in os.listdir(archive_path):
try:
files = os.listdir(os.path.join(archive_path, folder))
except NotADirectoryError:
continue
if files:
link = find_link(folder, links)
if link is None:
unmatched.append(folder)
continue
if folder != link['timestamp']:
bad_folders.append((folder, link))
else:
# delete empty folders
run(['rm', '-R', os.path.join(archive_path, folder)])
if bad_folders and IS_TTY and input('[!] Cleanup archive? y/[n]: ') == 'y':
print('[!] Fixing {} improperly named folders in archive...'.format(len(bad_folders)))
for folder, link in bad_folders:
fix_folder_path(archive_path, folder, link)
elif bad_folders:
print('[!] Warning! {} folders need to be merged, fix by running ArchiveBox.'.format(len(bad_folders)))
if unmatched:
print('[!] Warning! {} unrecognized folders in html/archive/'.format(len(unmatched)))
print(' '+ '\n '.join(unmatched))
def wget_output_path(link, look_in=None):
"""calculate the path to the wgetted .html file, since wget may
adjust some paths to be different than the base_url path.
See docs on wget --adjust-extension (-E)
"""
# if we have it stored, always prefer the actual output path to computed one
if link.get('latest', {}).get('wget'):
return link['latest']['wget']
urlencode = lambda s: quote(s, encoding='utf-8', errors='replace')
if link['type'] in ('PDF', 'image'):
return urlencode(link['base_url'])
# Since the wget algorithm to for -E (appending .html) is incredibly complex
# instead of trying to emulate it here, we just look in the output folder
# to see what html file wget actually created as the output
wget_folder = link['base_url'].rsplit('/', 1)[0].split('/')
look_in = os.path.join(ARCHIVE_DIR, link['timestamp'], *wget_folder)
if look_in and os.path.exists(look_in):
html_files = [
f for f in os.listdir(look_in)
if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", f, re.I | re.M)
]
if html_files:
return urlencode(os.path.join(*wget_folder, html_files[0]))
return None
# If finding the actual output file didn't work, fall back to the buggy
# implementation of the wget .html appending algorithm
# split_url = link['url'].split('#', 1)
# query = ('%3F' + link['url'].split('?', 1)[-1]) if '?' in link['url'] else ''
# if re.search(".+\\.[Hh][Tt][Mm][Ll]?$", split_url[0], re.I | re.M):
# # already ends in .html
# return urlencode(link['base_url'])
# else:
# # .html needs to be appended
# without_scheme = split_url[0].split('://', 1)[-1].split('?', 1)[0]
# if without_scheme.endswith('/'):
# if query:
# return urlencode('#'.join([without_scheme + 'index.html' + query + '.html', *split_url[1:]]))
# return urlencode('#'.join([without_scheme + 'index.html', *split_url[1:]]))
# else:
# if query:
# return urlencode('#'.join([without_scheme + '/index.html' + query + '.html', *split_url[1:]]))
# elif '/' in without_scheme:
# return urlencode('#'.join([without_scheme + '.html', *split_url[1:]]))
# return urlencode(link['base_url'] + '/index.html')
def derived_link_info(link):
"""extend link info with the archive urls and other derived data"""
link_info = {
**link,
'date': datetime.fromtimestamp(Decimal(link['timestamp'])).strftime('%Y-%m-%d %H:%M'),
'google_favicon_url': 'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
'favicon_url': 'archive/{timestamp}/favicon.ico'.format(**link),
'files_url': 'archive/{timestamp}/index.html'.format(**link),
'archive_url': 'archive/{}/{}'.format(link['timestamp'], wget_output_path(link) or 'index.html'),
'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link),
'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link),
'dom_link': 'archive/{timestamp}/output.html'.format(**link),
'archive_org_url': 'https://web.archive.org/web/{base_url}'.format(**link),
'title': link['title'] or link['url'],
}
# PDF and images are handled slightly differently
# wget, screenshot, & pdf urls all point to the same file
if link['type'] in ('PDF', 'image'):
link_info.update({
'archive_url': 'archive/{timestamp}/{base_url}'.format(**link),
'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link),
'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link),
'dom_link': 'archive/{timestamp}/{base_url}'.format(**link),
'title': link['title'] or basename(link['url']),
})
return link_info
def run(*popenargs, input=None, capture_output=False, timeout=None, check=False, **kwargs):
"""Patched of subprocess.run to fix blocking io making timeout=innefective"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE
if capture_output:
if ('stdout' in kwargs) or ('stderr' in kwargs):
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
with Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
process.kill()
try:
stdout, stderr = process.communicate(input, timeout=2)
except:
pass
raise TimeoutExpired(popenargs[0][0], timeout)
except BaseException as err:
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
def check_link_structure(link):
assert isinstance(link, dict)
assert isinstance(link.get('url'), str)
assert len(link['url']) > 2
def check_links_structure(links):
assert isinstance(links, list)
if links:
check_link_structure(links[0])
|
cluster.py
|
from collections import namedtuple
import copy
import functools
import heapq
import itertools
import logging
import queue
import random
import threading
# data types
Proposal = namedtuple('Proposal', ['caller', 'client_id', 'input'])
Ballot = namedtuple('Ballot', ['n', 'leader'])
# message types
Accepted = namedtuple('Accepted', ['slot', 'ballot_num'])
Accept = namedtuple('Accept', ['slot', 'ballot_num', 'proposal'])
Decision = namedtuple('Decision', ['slot', 'proposal'])
Invoked = namedtuple('Invoked', ['client_id', 'output'])
Invoke = namedtuple('Invoke', ['caller', 'client_id', 'input_value'])
Join = namedtuple('Join', [])
Active = namedtuple('Active', [])
Prepare = namedtuple('Prepare', ['ballot_num'])
Promise = namedtuple('Promise', ['ballot_num', 'accepted_proposals'])
Propose = namedtuple('Propose', ['slot', 'proposal'])
Welcome = namedtuple('Welcome', ['state', 'slot', 'decisions'])
Decided = namedtuple('Decided', ['slot'])
Preempted = namedtuple('Preempted', ['slot', 'preempted_by'])
Adopted = namedtuple('Adopted', ['ballot_num', 'accepted_proposals'])
Accepting = namedtuple('Accepting', ['leader'])
# constants - these times should really be in terms of average round-trip time
JOIN_RETRANSMIT = 0.7
CATCHUP_INTERVAL = 0.6
ACCEPT_RETRANSMIT = 1.0
PREPARE_RETRANSMIT = 1.0
INVOKE_RETRANSMIT = 0.5
LEADER_TIMEOUT = 1.0
NULL_BALLOT = Ballot(-1, -1) # sorts before all real ballots
NOOP_PROPOSAL = Proposal(None, None, None) # no-op to fill otherwise empty slots
class Node(object):
unique_ids = itertools.count()
def __init__(self, network, address):
self.network = network
self.address = address or 'N%d' % next(self.unique_ids)
self.logger = SimTimeLogger(logging.getLogger(self.address), {'network': self.network})
self.logger.info('starting')
self.roles = []
self.send = functools.partial(self.network.send, self)
def register(self, roles):
self.roles.append(roles)
def unregister(self, roles):
self.roles.remove(roles)
def receive(self, sender, message):
handler_name = 'do_%s' % type(message).__name__
for comp in self.roles[:]:
if not hasattr(comp, handler_name):
continue
comp.logger.debug("received %s from %s", message, sender)
fn = getattr(comp, handler_name)
fn(sender=sender, **message._asdict())
class Timer(object):
def __init__(self, expires, address, callback):
self.expires = expires
self.address = address
self.callback = callback
self.cancelled = False
def __cmp__(self, other):
return cmp(self.expires, other.expires)
def cancel(self):
self.cancelled = True
class Network(object):
PROP_DELAY = 0.03
PROP_JITTER = 0.02
DROP_PROB = 0.05
def __init__(self, seed):
self.nodes = {}
self.rnd = random.Random(seed)
self.timers = []
self.now = 1000.0
def new_node(self, address=None):
node = Node(self, address=address)
self.nodes[node.address] = node
return node
def run(self):
while self.timers:
next_timer = self.timers[0]
if next_timer.expires > self.now:
self.now = next_timer.expires
heapq.heappop(self.timers)
if next_timer.cancelled:
continue
if not next_timer.address or next_timer.address in self.nodes:
next_timer.callback()
def stop(self):
self.timers = []
def set_timer(self, address, seconds, callback):
timer = Timer(self.now + seconds, address, callback)
heapq.heappush(self.timers, timer)
return timer
def send(self, sender, destinations, message):
sender.logger.debug("sending %s to %s", message, destinations)
# avoid aliasing by making a closure containing distinct deep copy of message for each dest
def sendto(dest, message):
if dest == sender.address:
# reliably deliver local messages with no delay
self.set_timer(sender.address, 0, lambda: sender.receive(sender.address, message))
elif self.rnd.uniform(0, 1.0) > self.DROP_PROB:
delay = self.PROP_DELAY + self.rnd.uniform(-self.PROP_JITTER, self.PROP_JITTER)
self.set_timer(dest, delay, functools.partial(self.nodes[dest].receive,
sender.address, message))
for dest in (d for d in destinations if d in self.nodes):
sendto(dest, copy.deepcopy(message))
class SimTimeLogger(logging.LoggerAdapter):
def process(self, msg, kwargs):
return "T=%.3f %s" % (self.extra['network'].now, msg), kwargs
def getChild(self, name):
return self.__class__(self.logger.getChild(name),
{'network': self.extra['network']})
class Role(object):
def __init__(self, node):
self.node = node
self.node.register(self)
self.running = True
self.logger = node.logger.getChild(type(self).__name__)
def set_timer(self, seconds, callback):
return self.node.network.set_timer(self.node.address, seconds,
lambda: self.running and callback())
def stop(self):
self.running = False
self.node.unregister(self)
class Acceptor(Role):
def __init__(self, node):
super(Acceptor, self).__init__(node)
self.ballot_num = NULL_BALLOT
self.accepted_proposals = {} # {slot: (ballot_num, proposal)}
def do_Prepare(self, sender, ballot_num):
if ballot_num > self.ballot_num:
self.ballot_num = ballot_num
# we've heard from a scout, so it might be the next leader
self.node.send([self.node.address], Accepting(leader=sender))
self.node.send([sender], Promise(ballot_num=self.ballot_num, accepted_proposals=self.accepted_proposals))
def do_Accept(self, sender, ballot_num, slot, proposal):
if ballot_num >= self.ballot_num:
self.ballot_num = ballot_num
acc = self.accepted_proposals
if slot not in acc or acc[slot][0] < ballot_num:
acc[slot] = (ballot_num, proposal)
self.node.send([sender], Accepted(
slot=slot, ballot_num=self.ballot_num))
class Replica(Role):
def __init__(self, node, execute_fn, state, slot, decisions, peers):
super(Replica, self).__init__(node)
self.execute_fn = execute_fn
self.state = state
self.slot = slot
self.decisions = decisions
self.peers = peers
self.proposals = {}
# next slot num for a proposal (may lead slot)
self.next_slot = slot
self.latest_leader = None
self.latest_leader_timeout = None
# making proposals
def do_Invoke(self, sender, caller, client_id, input_value):
proposal = Proposal(caller, client_id, input_value)
slot = next((s for s, p in list(self.proposals.items()) if p == proposal), None)
# propose, or re-propose if this proposal already has a slot
self.propose(proposal, slot)
def propose(self, proposal, slot=None):
"""Send (or resend, if slot is specified) a proposal to the leader"""
if not slot:
slot, self.next_slot = self.next_slot, self.next_slot + 1
self.proposals[slot] = proposal
# find a leader we think is working - either the latest we know of, or
# ourselves (which may trigger a scout to make us the leader)
leader = self.latest_leader or self.node.address
self.logger.info("proposing %s at slot %d to leader %s" % (proposal, slot, leader))
self.node.send([leader], Propose(slot=slot, proposal=proposal))
# handling decided proposals
def do_Decision(self, sender, slot, proposal):
assert not self.decisions.get(self.slot, None), \
"next slot to commit is already decided"
if slot in self.decisions:
assert self.decisions[slot] == proposal, \
"slot %d already decided with %r!" % (slot, self.decisions[slot])
return
self.decisions[slot] = proposal
self.next_slot = max(self.next_slot, slot + 1)
# re-propose our proposal in a new slot if it lost its slot and wasn't a no-op
our_proposal = self.proposals.get(slot)
if our_proposal is not None and our_proposal != proposal and our_proposal.caller:
self.propose(our_proposal)
# execute any pending, decided proposals
while True:
commit_proposal = self.decisions.get(self.slot)
if not commit_proposal:
break # not decided yet
commit_slot, self.slot = self.slot, self.slot + 1
self.commit(commit_slot, commit_proposal)
def commit(self, slot, proposal):
"""Actually commit a proposal that is decided and in sequence"""
decided_proposals = [p for s, p in list(self.decisions.items()) if s < slot]
if proposal in decided_proposals:
self.logger.info("not committing duplicate proposal %r at slot %d", proposal, slot)
return # duplicate
self.logger.info("committing %r at slot %d" % (proposal, slot))
if proposal.caller is not None:
# perform a client operation
self.state, output = self.execute_fn(self.state, proposal.input)
self.node.send([proposal.caller], Invoked(client_id=proposal.client_id, output=output))
# tracking the leader
def do_Adopted(self, sender, ballot_num, accepted_proposals):
self.latest_leader = self.node.address
self.leader_alive()
def do_Accepting(self, sender, leader):
self.latest_leader = leader
self.leader_alive()
def do_Active(self, sender):
if sender != self.latest_leader:
return
self.leader_alive()
def leader_alive(self):
if self.latest_leader_timeout:
self.latest_leader_timeout.cancel()
def reset_leader():
idx = self.peers.index(self.latest_leader)
self.latest_leader = self.peers[(idx + 1) % len(self.peers)]
self.logger.debug("leader timed out; tring the next one, %s", self.latest_leader)
self.latest_leader_timeout = self.set_timer(LEADER_TIMEOUT, reset_leader)
# adding new cluster members
def do_Join(self, sender):
if sender in self.peers:
self.node.send([sender], Welcome(
state=self.state, slot=self.slot, decisions=self.decisions))
class Commander(Role):
def __init__(self, node, ballot_num, slot, proposal, peers):
super(Commander, self).__init__(node)
self.ballot_num = ballot_num
self.slot = slot
self.proposal = proposal
self.acceptors = set([])
self.peers = peers
self.quorum = len(peers) / 2 + 1
def start(self):
self.node.send(set(self.peers) - self.acceptors, Accept(
slot=self.slot, ballot_num=self.ballot_num, proposal=self.proposal))
self.set_timer(ACCEPT_RETRANSMIT, self.start)
def finished(self, ballot_num, preempted):
if preempted:
self.node.send([self.node.address], Preempted(slot=self.slot, preempted_by=ballot_num))
else:
self.node.send([self.node.address], Decided(slot=self.slot))
self.stop()
def do_Accepted(self, sender, slot, ballot_num):
if slot != self.slot:
return
if ballot_num == self.ballot_num:
self.acceptors.add(sender)
if len(self.acceptors) < self.quorum:
return
self.node.send(self.peers, Decision(slot=self.slot, proposal=self.proposal))
self.finished(ballot_num, False)
else:
self.finished(ballot_num, True)
class Scout(Role):
def __init__(self, node, ballot_num, peers):
super(Scout, self).__init__(node)
self.ballot_num = ballot_num
self.accepted_proposals = {}
self.acceptors = set([])
self.peers = peers
self.quorum = len(peers) / 2 + 1
self.retransmit_timer = None
def start(self):
self.logger.info("scout starting")
self.send_prepare()
def send_prepare(self):
self.node.send(self.peers, Prepare(ballot_num=self.ballot_num))
self.retransmit_timer = self.set_timer(PREPARE_RETRANSMIT, self.send_prepare)
def update_accepted(self, accepted_proposals):
acc = self.accepted_proposals
for slot, (ballot_num, proposal) in list(accepted_proposals.items()):
if slot not in acc or acc[slot][0] < ballot_num:
acc[slot] = (ballot_num, proposal)
def do_Promise(self, sender, ballot_num, accepted_proposals):
if ballot_num == self.ballot_num:
self.logger.info("got matching promise; need %d" % self.quorum)
self.update_accepted(accepted_proposals)
self.acceptors.add(sender)
if len(self.acceptors) >= self.quorum:
# strip the ballot numbers from self.accepted_proposals, now that it
# represents a majority
accepted_proposals = dict((s, p) for s, (b, p) in list(self.accepted_proposals.items()))
# We're adopted; note that this does *not* mean that no other leader is active.
# Any such conflicts will be handled by the commanders.
self.node.send([self.node.address],
Adopted(ballot_num=ballot_num, accepted_proposals=accepted_proposals))
self.stop()
else:
# this acceptor has promised another leader a higher ballot number, so we've lost
self.node.send([self.node.address], Preempted(slot=None, preempted_by=ballot_num))
self.stop()
class Leader(Role):
def __init__(self, node, peers, commander_cls=Commander, scout_cls=Scout):
super(Leader, self).__init__(node)
self.ballot_num = Ballot(0, node.address)
self.active = False
self.proposals = {}
self.commander_cls = commander_cls
self.scout_cls = scout_cls
self.scouting = False
self.peers = peers
def start(self):
# reminder others we're active before LEADER_TIMEOUT expires
def active():
if self.active:
self.node.send(self.peers, Active())
self.set_timer(LEADER_TIMEOUT / 2.0, active)
active()
def spawn_scout(self):
assert not self.scouting
self.scouting = True
self.scout_cls(self.node, self.ballot_num, self.peers).start()
def do_Adopted(self, sender, ballot_num, accepted_proposals):
self.scouting = False
self.proposals.update(accepted_proposals)
# note that we don't re-spawn commanders here; if there are undecided
# proposals, the replicas will re-propose
self.logger.info("leader becoming active")
self.active = True
def spawn_commander(self, ballot_num, slot):
proposal = self.proposals[slot]
self.commander_cls(self.node, ballot_num, slot, proposal, self.peers).start()
def do_Preempted(self, sender, slot, preempted_by):
if not slot: # from the scout
self.scouting = False
self.logger.info("leader preempted by %s", preempted_by.leader)
self.active = False
self.ballot_num = Ballot((preempted_by or self.ballot_num).n + 1, self.ballot_num.leader)
def do_Propose(self, sender, slot, proposal):
if slot not in self.proposals:
if self.active:
self.proposals[slot] = proposal
self.logger.info("spawning commander for slot %d" % (slot,))
self.spawn_commander(self.ballot_num, slot)
else:
if not self.scouting:
self.logger.info("got PROPOSE when not active - scouting")
self.spawn_scout()
else:
self.logger.info("got PROPOSE while scouting; ignored")
else:
self.logger.info("got PROPOSE for a slot already being proposed")
class Bootstrap(Role):
def __init__(self, node, peers, execute_fn,
replica_cls=Replica, acceptor_cls=Acceptor, leader_cls=Leader,
commander_cls=Commander, scout_cls=Scout):
super(Bootstrap, self).__init__(node)
self.execute_fn = execute_fn
self.peers = peers
self.peers_cycle = itertools.cycle(peers)
self.replica_cls = replica_cls
self.acceptor_cls = acceptor_cls
self.leader_cls = leader_cls
self.commander_cls = commander_cls
self.scout_cls = scout_cls
def start(self):
self.join()
def join(self):
self.node.send([next(self.peers_cycle)], Join())
self.set_timer(JOIN_RETRANSMIT, self.join)
def do_Welcome(self, sender, state, slot, decisions):
self.acceptor_cls(self.node)
self.replica_cls(self.node, execute_fn=self.execute_fn, peers=self.peers,
state=state, slot=slot, decisions=decisions)
self.leader_cls(self.node, peers=self.peers, commander_cls=self.commander_cls,
scout_cls=self.scout_cls).start()
self.stop()
class Seed(Role):
def __init__(self, node, initial_state, execute_fn, peers, bootstrap_cls=Bootstrap):
super(Seed, self).__init__(node)
self.initial_state = initial_state
self.execute_fn = execute_fn
self.peers = peers
self.bootstrap_cls = bootstrap_cls
self.seen_peers = set([])
self.exit_timer = None
def do_Join(self, sender):
self.seen_peers.add(sender)
if len(self.seen_peers) <= len(self.peers) / 2:
return
# cluster is ready - welcome everyone
self.node.send(list(self.seen_peers), Welcome(
state=self.initial_state, slot=1, decisions={}))
# stick around for long enough that we don't hear any new JOINs from
# the newly formed cluster
if self.exit_timer:
self.exit_timer.cancel()
self.exit_timer = self.set_timer(JOIN_RETRANSMIT * 2, self.finish)
def finish(self):
# bootstrap this node into the cluster we just seeded
bs = self.bootstrap_cls(self.node, peers=self.peers, execute_fn=self.execute_fn)
bs.start()
self.stop()
class Requester(Role):
client_ids = itertools.count(start=100000)
def __init__(self, node, n, callback):
super(Requester, self).__init__(node)
self.client_id = next(self.client_ids)
self.n = n
self.output = None
self.callback = callback
def start(self):
self.node.send([self.node.address], Invoke(caller=self.node.address,
client_id=self.client_id, input_value=self.n))
self.invoke_timer = self.set_timer(INVOKE_RETRANSMIT, self.start)
def do_Invoked(self, sender, client_id, output):
if client_id != self.client_id:
return
self.logger.debug("received output %r" % (output,))
self.invoke_timer.cancel()
self.callback(output)
self.stop()
class Member(object):
def __init__(self, state_machine, network, peers, seed=None,
seed_cls=Seed, bootstrap_cls=Bootstrap):
self.network = network
self.node = network.new_node()
if seed is not None:
self.startup_role = seed_cls(self.node, initial_state=seed, peers=peers,
execute_fn=state_machine)
else:
self.startup_role = bootstrap_cls(self.node, execute_fn=state_machine, peers=peers)
self.requester = None
def start(self):
self.startup_role.start()
self.thread = threading.Thread(target=self.network.run)
self.thread.start()
def invoke(self, input_value, request_cls=Requester):
assert self.requester is None
q = queue.Queue()
self.requester = request_cls(self.node, input_value, q.put)
self.requester.start()
output = q.get()
self.requester = None
return output
|
wavingdetected.py
|
# DOCUMENTATION
# http://doc.aldebaran.com/2-5/naoqi/peopleperception/alengagementzones-api.html#alengagementzones-api
import qi
import argparse
import sys
import os
import time
import threading
import math
from naoqi import ALProxy
import conditions
from conditions import set_condition
waving_person_id = 0
def waving_callback(data):
global wavingdetected
global wavingpersonid
global wavingpersonx
global wavingpersony
#data = category ,confidence, xmin,ymin,xmax,ymax
confidence = memory_service.getData("Actions/PeopleWaving/person01/WaveProbability")
xmin = memory_service.getData("Actions/PeopleWaving/person01/BBox/Xmin")
xmax = memory_service.getData("Actions/PeopleWaving/person01/BBox/Xmax")
ymin = memory_service.getData("Actions/PeopleWaving/person01/BBox/Ymin")
ymax = memory_service.getData("Actions/PeopleWaving/person01/BBox/Ymax")
x_wave = (float(xmax) - float(xmin)) / 2
y_wave = (float(ymax) - float(ymin)) / 4 + float(ymin)
# compute the closest person identified to the waving coordinates
plist = memory_service.getData("PeoplePerception/VisiblePeopleList")
max_distance = 100000000
closest_waving_person_id = 0
print " [ WAVING DETECTED ]"
for personid in plist:
pmemkey_isface = "PeoplePerception/Person/"+str(personid)+"/IsFaceDetected"
try:
data = memory_service.getData(pmemkey_isface)
if data == 0:
print " Person ID: ",personid , "- NO Face detected "
else:
pmemkey_face = "PeoplePerception/Person/"+str(personid)+"/FacialPartsProperties"
data = memory_service.getData(pmemkey_face)
# Upper mouth coordinates
y_person_mouth = data[11][0][1]
x_person_mouth = data[11][0][0]
distance = math.sqrt(math.pow(x_wave-x_person_mouth,2) + math.pow(y_wave-y_person_mouth,2))
print " Person ID: ",personid , "- Pixel Dist: ",distance, "- Confidence: ", confidence
if distance < max_distance:
closest_waving_person_id = personid
max_distance = distance
except:
print " Person ID: ",personid , "- NO Face detected "
if closest_waving_person_id == 0:
print "\n"
print " [ NO PERSON ID WAVING fOUND ]"
print "\n"
wavingdetected = 0
else:
pmemkey_pos = "PeoplePerception/Person/"+str(closest_waving_person_id)+"/PositionInRobotFrame"
data = memory_service.getData(pmemkey_pos)
print " [ PERSON WAVING ]"
print " Person ID: ", closest_waving_person_id
print " Person X: ", data[1] #still dont know if this is the correct one
print " Person Y: ", data[2] #still dont know if this is the correct one
print "\n"
wavingpersonid = closest_waving_person_id
wavingpersonx = data[1]
wavingpersony = data[2]
wavingdetected = 1
def rhMonitorThread (memory_service):
global wavingdetected
global wavingpersonid
global wavingpersonx
global wavingpersony
wavingdetected = 0
t = threading.currentThread()
print "personhere thread started"
while getattr(t, "do_run", True):
plist = memory_service.getData("PeoplePerception/VisiblePeopleList")
v = 'false'
wavingDetection = memory_service.subscriber("Actions/PeopleWaving/NewDetection")
idAnyDetection = wavingDetection.signal.connect(waving_callback)
if wavingdetected == 1:
memory_service.insertData('Actions/wavingdetected/wavingpersonid',str(wavingpersonid))
memory_service.insertData('Actions/wavingdetected/wavingpersonx',str(wavingpersonx))
memory_service.insertData('Actions/wavingdetected/wavingpersony',str(wavingpersony))
v = 'true'
set_condition(memory_service,'wavingdetected',v)
time.sleep(1)
print "personhere thread quit"
def init(session):
global memory_service
global monitorThread
print "Waving detection init"
#Starting services
memory_service = session.service("ALMemory")
zones_service = session.service("ALEngagementZones")
people_service = session.service("ALPeoplePerception")
print "Creating the thread"
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
def quit():
global monitorThread
print "Waving detection quit"
monitorThread.do_run = False
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from electrum import constants
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
import time
from datetime import datetime
from pytz import timezone
from tzlocal import get_localzone
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor,QDoubleValidator
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal,QModelIndex,QItemSelectionModel,QSize
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,QLayout,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,QFrame,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar, QListView,QAbstractItemView,QSpacerItem, QSizePolicy,QListWidget,QListWidgetItem)
from PyQt5.QtGui import QStandardItemModel, QStandardItem,QFont
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .betting_history_list import (BettingHistoryList, BettingHistoryModel)
from .update_checker import UpdateCheck, UpdateCheckThread
from electrum.bet import PeerlessBet
from PyQt5 import QtWidgets
from .event_list import EventListView
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.hbox=QHBoxLayout()
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.events_data = ''
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.betting_tab = self.create_betting_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.betting_tab, read_QIcon("tab_send.png"), _('Betting'))
tabs.addTab(self.create_betting_history_tab(), read_QIcon("tab_history.png"), _('Betting History'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
self.betting_history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
self.betting_history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
# try:
# self.events_data = self.network.run_from_another_thread(self.network.get_events_list(timeout=3))
# print('Event List: ', self.events_data)
# except Exception as e:
# self.show_message(_("Error getting event list from network") + ":\n" + str(e))
# return
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-Wagerr Testnet" if constants.net.TESTNET else "Electrum-Wagerr"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('wagerr:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.betting_history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
try:
self.events_data = self.network.run_from_another_thread(self.network.get_events_list(timeout=3))
#print('Event List: ', self.events_data)
except Exception as e:
self.logger.info(f'Error getting event list from network: {repr(e)}')
return
self.events_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_betting_history_tab(self):
self.betting_history_model = BettingHistoryModel(self)
self.betting_history_list = l = BettingHistoryList(self, self.betting_history_model)
self.betting_history_list.setStyleSheet(
"QTreeView {"
"show-decoration-selected: 1;"
"}"
"QTreeView::item {"
"border: 1px solid #d9d9d9;"
"border-top-color: transparent;"
"border-bottom-color: transparent;"
"padding: 5px;"
"}"
"QTreeView::item:hover {"
"background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #e7effd, stop: 1 #cbdaf1);"
"border: 1px solid #bfcde4;"
"}"
"QTreeView::item:selected:active{"
"background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #6ea1f1, stop: 1 #567dbc);"
"}"
"QTreeView::item:selected:!active {"
"background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #6b9be8, stop: 1 #577fbf);"
"}"
)
self.betting_history_list.setAlternatingRowColors(True)
self.betting_history_model.set_view(self.betting_history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Wagerr address where the payment should be received. Note that each payment request uses a different Wagerr address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Wagerr address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Wagerr address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Wagerr transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def create_betting_tab(self):
self.grid_betting=QGridLayout()
#self.grid_betting.setColumnStretch(2,4)
self.grid_betting.setColumnStretch(0,4)
self.grid_betting.setColumnStretch(1,6)
self.eventQListWidget = QListWidget()
#self.eventQListWidget.setFixedWidth(900)
self.eventQListWidget.setMinimumWidth(800)
self.eventQListWidget.setStyleSheet(" QListWidget::item {margin: 5px; border: 1px solid grey }")
self.betQListWidget = QListWidget()
self.betQListWidget.setFixedWidth(320)
self.betQListWidget.setStyleSheet(
"QListWidget::item {"
"border: 1px solid #d9d9d9;"
#"border-top-color: transparent;"
#"border-bottom-color: transparent;"
"margin: 5px;"
"}"
"QListWidget::item:hover {"
"background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #e7effd, stop: 1 #cbdaf1);"
"border: 1px solid #bfcde4;"
"}"
"QListWidget::item:selected:active{"
"background: rgb(250, 218, 221);"
"}"
# "QListWidget::item:selected:!active {"
# "background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #6b9be8, stop: 1 #6ea1f1);"
# "}"
)
self.bet_slip=QLabel("BET SLIP")
self.bet_slip.setStyleSheet("QLabel { background-color : rgb(250, 218, 221); }")
self.clear_slip=QPushButton("CLEAR SLIP")
self.clear_slip.clicked.connect(self.Clear_Clicked)
self.hbox_slip=QHBoxLayout()
self.hbox_slip.addWidget(self.bet_slip)
self.hbox_slip.addWidget(self.clear_slip)
self.hbox_slip.setAlignment(Qt.AlignTop)
self.betting_grid = grid = QGridLayout()
self.vbox_b=QVBoxLayout()
self.vbox_b.addLayout(self.hbox_slip)
self.events_list = EventListView(self)
self.events_list.setFixedWidth(150)
#self.events_list.setMinimumWidth(150)
self.w = QWidget()
self.grid_betting.addWidget(self.events_list,0,0)
self.grid_betting.addLayout(self.vbox_b,0,2)
#self.grid_betting.setColumnMinimumWidth(1,1120)
self.grid_betting.setColumnMinimumWidth(2,150)
self.w.setLayout(self.grid_betting)
#self.w.setMinimumSize(800, 800)
run_hook('create_betting_tab', grid)
return self.w
def Clear_Clicked(self):
self.betQListWidget.clear()
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def read_bet_tab(self,a):
label = 'Betting Transaction'
eventId = int(a.eventIdToBetOn)
outcome = int(a.betOutcome)
amount = int(a.editBettingAmount.get_amount())
print("Event Id: ",eventId)
print("Outcome: ",outcome)
print("Amount: ",amount)
pb = PeerlessBet(eventId, outcome)
opCode=''
isPeerlessBet,opCode=PeerlessBet.ToOpCode(pb)
if not(isPeerlessBet) :
raise Exception('Error converting PeerlessBet to opcode')
print('OpCode:',opCode)
outputs = [TxOutput(bitcoin.TYPE_BET, opCode, amount)]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Wagerr Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Wagerr Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_bet(self,a,preview = False):
#print('do_bet called')
if run_hook('abort_bet', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_bet_tab(a)
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
#print('do_bet calling make_unsigned_transaction')
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
#print('do_bet calling make_unsigned_transaction done')
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
print('do_bet sign_done else')
self.broadcast_transaction(tx, tx_desc)
print('do_bet calling sign_tx_with_password')
self.sign_tx_with_password(tx, sign_done, password)
def do_send(self, preview = False):
print('do_send called')
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
print('calling make_unsigned_transaction')
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
print('calling make_unsigned_transaction done')
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
print('sign_done else')
self.broadcast_transaction(tx, tx_desc)
print('calling sign_tx_with_password')
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
print('broadcast_transaction')
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
#self.eventid_e.is_pr = False
# for e in [self.eventid_e, self.outcome_e, self.betting_amount_e]:
# e.setText('')
# e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.betting_history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.betting_history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.betting_history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Wagerr address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Wagerr address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
self.betting_history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.betting_history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.betting_history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 WGR = 1000 mWGR.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.betting_history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
drum_server_utils.py
|
import os
import psutil
import requests
import signal
import time
from threading import Thread
from datarobot_drum.drum.utils import CMRunnerUtils
from datarobot_drum.drum.common import ArgumentsOptions, ArgumentOptionsEnvVars
from datarobot_drum.resource.utils import _exec_shell_cmd, _cmd_add_class_labels
def _wait_for_server(url, timeout, process_holder):
# waiting for ping to succeed
while True:
try:
response = requests.get(url)
if response.ok:
break
except Exception:
pass
time.sleep(1)
timeout -= 1
if timeout <= 0:
if process_holder is not None:
print("Killing subprocess: {}".format(process_holder.process.pid))
os.killpg(os.getpgid(process_holder.process.pid), signal.SIGTERM)
time.sleep(0.25)
os.killpg(os.getpgid(process_holder.process.pid), signal.SIGKILL)
assert timeout, "Server failed to start: url: {}".format(url)
def _run_server_thread(cmd, process_obj_holder, verbose=True):
_exec_shell_cmd(
cmd,
"Failed in {} command line! {}".format(ArgumentsOptions.MAIN_COMMAND, cmd),
assert_if_fail=False,
process_obj_holder=process_obj_holder,
verbose=verbose,
)
class DrumServerProcess:
def __init__(self):
self.process = None
self.out_stream = None
self.err_stream = None
@property
def returncode(self):
return self.process.returncode
class DrumServerRun:
"""
Utility to help run a local drum prediction server for tests and prediction validation
"""
def __init__(
self,
target_type,
labels,
custom_model_dir,
docker=None,
with_error_server=False,
show_stacktrace=True,
nginx=False,
memory=None,
fail_on_shutdown_error=True,
pass_args_as_env_vars=False,
verbose=True,
append_cmd=None,
):
port = CMRunnerUtils.find_free_port()
self.server_address = "localhost:{}".format(port)
url_host = os.environ.get("TEST_URL_HOST", "localhost")
if docker:
self.url_server_address = "http://{}:{}".format(url_host, port)
else:
self.url_server_address = "http://localhost:{}".format(port)
cmd = "{} server".format(ArgumentsOptions.MAIN_COMMAND)
if pass_args_as_env_vars:
os.environ[ArgumentOptionsEnvVars.CODE_DIR] = str(custom_model_dir)
os.environ[ArgumentOptionsEnvVars.TARGET_TYPE] = target_type
os.environ[ArgumentOptionsEnvVars.ADDRESS] = self.server_address
else:
cmd += " --code-dir {} --target-type {} --address {}".format(
custom_model_dir, target_type, self.server_address
)
if labels:
cmd = _cmd_add_class_labels(
cmd, labels, target_type=target_type, pass_args_as_env_vars=pass_args_as_env_vars
)
if docker:
cmd += " --docker {}".format(docker)
if memory:
cmd += " --memory {}".format(memory)
if with_error_server:
if pass_args_as_env_vars:
os.environ[ArgumentOptionsEnvVars.WITH_ERROR_SERVER] = "1"
else:
cmd += " --with-error-server"
if show_stacktrace:
if pass_args_as_env_vars:
os.environ[ArgumentOptionsEnvVars.SHOW_STACKTRACE] = "1"
else:
cmd += " --show-stacktrace"
if nginx:
if pass_args_as_env_vars:
os.environ[ArgumentOptionsEnvVars.PRODUCTION] = "1"
else:
cmd += " --production"
if append_cmd is not None:
cmd += " " + append_cmd
self._cmd = cmd
self._process_object_holder = DrumServerProcess()
self._server_thread = None
self._with_nginx = nginx
self._fail_on_shutdown_error = fail_on_shutdown_error
self._verbose = verbose
def __enter__(self):
self._server_thread = Thread(
target=_run_server_thread, args=(self._cmd, self._process_object_holder, self._verbose)
)
self._server_thread.start()
time.sleep(0.5)
_wait_for_server(
self.url_server_address, timeout=10, process_holder=self._process_object_holder
)
return self
def _shutdown_server(self):
if not self._with_nginx:
response = requests.post(self.url_server_address + "/shutdown/")
assert response.ok
time.sleep(1)
self._server_thread.join()
else:
# When running with nginx:
# this test starts drum process with --docker option,
# that process starts drum server inside docker.
# nginx server doesn't have shutdown API, so we need to kill it
# This loop kill all the chain except for docker
parent = psutil.Process(self._process_object_holder.process.pid)
for child in parent.children(recursive=True):
child.kill()
parent.kill()
# this kills drum running in the docker
for proc in psutil.process_iter():
if "{}".format(ArgumentsOptions.MAIN_COMMAND) in proc.name().lower():
print(proc.cmdline())
if "{}".format(ArgumentsOptions.SERVER) in proc.cmdline():
if "--production" in proc.cmdline():
try:
proc.terminate()
time.sleep(0.3)
proc.kill()
except psutil.NoSuchProcess:
pass
break
self._server_thread.join(timeout=5)
def __exit__(self, exc_type, exc_val, exc_tb):
# shutdown server
if self._fail_on_shutdown_error:
self._shutdown_server()
else:
try:
self._shutdown_server()
except Exception:
pass
@property
def process(self):
return self._process_object_holder or None
|
main.py
|
"""This module contains the overall UI frame object and is responsible for launching it."""
from helper import wipe_prediction_input_images, update_dropdown, filter_city, initialize_cities
from label import ImageLabel
from detect import Detection
from debug import QTextEditLogger
from PyQt5.QtWidgets import (
QWidget,
QPushButton,
QStatusBar,
QMenuBar,
QMessageBox,
QComboBox,
QApplication,
QMainWindow,
QStackedWidget,
QLineEdit,
QCheckBox,
QSizePolicy,
QSystemTrayIcon
)
from PyQt5 import QtGui
from PyQt5.QtMultimedia import QCamera, QCameraInfo
from PyQt5.QtMultimediaWidgets import QCameraViewfinder
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtCore import QCoreApplication, QRect, QMetaObject
from api_communication.api_handler import get_downloaded_model, get_dwh_model_version, \
get_supported_cities, send_city_request, send_new_image
from datetime import datetime
import shutil
import sys
import os
from pathlib import Path
import time
import logging
from threading import Thread
OUTPUT_PREDICTION_DIR = "./runs/detect/"
INPUT_PREDICTION_DIR = "./data/images"
START = "Start Detection"
STOP = "Stop Detection"
ENABLE = "Enable File Drop"
DISABLE = "Disable File Drop"
WINDOW = "MainWindow"
logo_without_text = "icon_logo.png"
logo_with_text = "logo.png"
loading_image = "loading_image.png"
class UiMainWindow(QWidget):
"""Main UI window of the application.
Attributes:
----------
window_width: int
Width of the window
window_height: int
Height of the window
button_width: int
Width of buttons
button_height: int
Height of buttons
dist: int
Distance to the edge of Widgets(Window/Button/Label...)
model_selected: bool
Shows whether a model is selected or not
"""
window_height = 650
window_width = 800
button_width = 180
button_height = 50
dist = 30
model_selected = False
textbox_height = 25
small_button_width = 100
small_button_height = 30
debug_height = 200
debug_mode = False
accepted_download = False
current_city = ""
def __init__(self, parent) -> None:
super().__init__(parent)
main_window.setObjectName("main_window")
main_window.resize(self.window_width, self.window_height)
self.centralwidget = QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
self.detector = Detection()
self.Box_Stadt = QComboBox(self.centralwidget)
self.Box_Stadt.setGeometry(QRect(self.dist, self.dist, self.button_width, self.button_height))
self.Box_Stadt.setObjectName("Box_Stadt")
self.Box_Stadt.activated.connect(self.on_dropdown_selected)
# dynamic city updates
supported_cities_updater = Thread(target=update_dropdown, daemon=True, args=(self.Box_Stadt,))
supported_cities_updater.start()
self.Text_City = QLineEdit(self.centralwidget)
self.Text_City.setGeometry(
QRect(self.dist + self.dist + self.button_width, self.dist + 10,
self.button_width, self.textbox_height))
self.Text_City.setObjectName("Text_City")
self.Text_City.setToolTip(
'Enter a city you wish to detect sights in that you cannot find in the dropdown on the left after updating.')
self.Button_City = QPushButton(self.centralwidget)
self.Button_City.setGeometry(
QRect(
int(2.3 * self.dist) + self.button_width + self.button_width, self.dist + 8,
self.small_button_width, self.small_button_height)
)
self.Button_City.setObjectName("Button_City")
self.Button_City.clicked.connect(self.request_city)
self.Button_Detection = QPushButton(self.centralwidget)
self.Button_Detection.setGeometry(
QRect(
self.window_width - (self.dist + self.button_width),
self.window_height - (self.dist + self.button_height + 20),
self.button_width,
self.button_height,
)
)
self.Button_Detection.setObjectName("Button_Detection")
self.Button_Detection.clicked.connect(self.detect_sights)
self.Button_Bild = QPushButton(self.centralwidget)
self.Button_Bild.setGeometry(
QRect(
self.dist,
self.window_height - (self.dist + self.button_height + 20),
self.button_width,
self.button_height,
)
)
self.Button_Bild.setObjectName("Button_Bild")
self.Button_Bild.clicked.connect(lambda: self.camera_viewfinder.hide())
self.Button_Bild.clicked.connect(lambda: self.Box_Camera_selector.setCurrentIndex(0))
self.Button_Bild.clicked.connect(lambda: self.stacked_widget.setCurrentIndex(0))
self.Button_Bild.clicked.connect(lambda: self.Label_Bild.show())
self.Button_Bild.clicked.connect(self.dragdrop)
self.available_cameras = QCameraInfo.availableCameras()
self.Box_Camera_selector = QComboBox(self.centralwidget)
self.Box_Camera_selector.setGeometry(
QRect(
self.window_width - (self.dist + self.button_width),
self.dist,
self.button_width,
self.button_height,
)
)
self.Box_Camera_selector.setObjectName("Box_Camera_selector")
self.Box_Camera_selector.addItem("")
# self.Box_Camera_selector.addItems([camera.description() for camera in self.available_cameras])
self.Box_Camera_selector.addItems(
["Camera " + str(i) + ": " + str(self.available_cameras[i].description()) for i in
range(len(self.available_cameras))])
self.Box_Camera_selector.currentIndexChanged.connect(self.select_camera)
self.stacked_widget = QStackedWidget(self.centralwidget)
label_height = (self.window_height - self.dist - self.button_height - self.dist) - (
self.dist + self.button_height + self.dist
)
label_start_y = self.dist + self.button_height + self.dist
self.stacked_widget.setGeometry(
QRect(
self.dist,
label_start_y,
self.window_width - (self.dist * 2),
label_height,
)
)
self.camera_viewfinder = QCameraViewfinder()
self.Label_Bild = ImageLabel(self)
self.Label_Bild.setGeometry(QRect(0, 0, self.window_width - (self.dist * 2), label_height))
self.checkBoxImprove = QCheckBox("Help improving SightScan's detection quality", self.centralwidget)
self.checkBoxImprove.setObjectName(u"improvement")
self.checkBoxImprove.setGeometry(
QRect(
self.dist,
5,
350,
20)
)
self.checkBoxImprove.setChecked(False)
self.checkBoxImprove.stateChanged.connect(self.set_improve_quality_var)
self.checkBox = QCheckBox("Debug", self.centralwidget)
self.checkBox.setObjectName(u"checkBox")
self.checkBox.setGeometry(
QRect(
self.window_width - (self.dist + 50),
self.window_height - (self.dist + 20),
70,
20)
)
self.checkBox.setChecked(False)
self.checkBox.stateChanged.connect(self.debug_click)
# Setup logging
fn = "logs/" + datetime.now().strftime('%d_%m_%Y__%H_%M_%S') + 'log.log'
if not os.path.exists("logs"):
os.mkdir("logs")
f = '%(asctime)s :: %(levelname)s :: %(filename)s :: %(funcName)s :: %(lineno)d :: %(message)s'
self.textDebug = QTextEditLogger(self.centralwidget)
self.textDebug.setFormatter(logging.Formatter(f))
logging.basicConfig(filename=fn, format=f, level=logging.DEBUG)
logging.getLogger().addHandler(self.textDebug)
# Log Text Box in GUI
self.textDebug.widget.setObjectName(u"textEdit")
self.textDebug.widget.setEnabled(False)
self.textDebug.widget.setGeometry(
QRect
(
self.dist,
self.window_height,
self.window_width - 2 * self.dist,
self.debug_height - self.dist
)
)
size_policy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
size_policy.setHorizontalStretch(0)
size_policy.setVerticalStretch(0)
size_policy.setHeightForWidth(self.textDebug.widget.sizePolicy().hasHeightForWidth())
self.textDebug.widget.setSizePolicy(size_policy)
self.textDebug.widget.setReadOnly(True)
self.stacked_widget.addWidget(self.Label_Bild)
self.stacked_widget.addWidget(self.camera_viewfinder)
main_window.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(main_window)
self.menubar.setGeometry(QRect(0, 0, 678, 21))
self.menubar.setObjectName("menubar")
main_window.setMenuBar(self.menubar)
self.statusbar = QStatusBar(main_window)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
main_window.setWindowIcon(QIcon(logo_without_text))
self.retranslateUi(main_window)
QMetaObject.connectSlotsByName(main_window)
def set_improve_quality_var(self):
self.improve_checkbox_enabled = self.checkBoxImprove.isChecked()
def retranslateUi(self, main_window: QMainWindow) -> None:
"""Set the text initially for all items.
Parameters
----------
main_window: QMainWindow
The instance of the prepared application window
"""
_translate = QCoreApplication.translate
main_window.setWindowTitle(_translate(WINDOW, "SightScan"))
self.Box_Stadt.addItems(['Choose City'] + initialize_cities())
self.Box_Camera_selector.setItemText(0, _translate(WINDOW, "Choose Webcam"))
self.Button_Detection.setText(_translate(WINDOW, START))
self.Button_Bild.setText(_translate(WINDOW, ENABLE))
self.Button_City.setText(_translate(WINDOW, "Add City"))
def on_dropdown_selected(self) -> None:
"""Shows a pop-up for confirming the download of the selected city."""
city_pretty_print = self.Box_Stadt.currentText()
city = self.Box_Stadt.currentText().replace(' ', '_').upper()
if city != "CHOOSE_CITY":
self.current_city = self.Box_Stadt.currentText()
# if no connection to dos
if get_supported_cities() == []:
latest_version = "couldn't get the latest version"
downloaded_version = "couldn't get the downloaded version"
print('no connection to dos')
# if connection to dos
else:
downloaded_version = -1 # initialization
Path("weights").mkdir(mode=0o700, exist_ok=True)
if not os.path.exists("weights/versions.txt"):
with open('weights/versions.txt', 'w'): # creating a version file
pass
with open("weights/versions.txt", "r") as file:
for line in file:
elements = line.split("=")
if elements[0].upper() == city:
downloaded_version = int(elements[1])
break
latest_version = get_dwh_model_version(city)
if downloaded_version == -1:
msg = QMessageBox()
msg.setWindowTitle("Download City")
msg.setWindowIcon(QIcon(logo_without_text))
msg.setText("Do you want to download " + city_pretty_print + "?")
msg.setIcon(QMessageBox.Question)
msg.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.setInformativeText("When downloaded, sights of " + city_pretty_print + " can be detected.")
msg.buttonClicked.connect(self.handover_city)
msg.exec_()
elif latest_version > downloaded_version:
update_msg = QMessageBox()
update_msg.setWindowTitle("Update available")
update_msg.setWindowIcon(QIcon(logo_without_text))
update_msg.setText("Do you want to download an update for " + city + "?")
update_msg.setIcon(QMessageBox.Question)
update_msg.setStandardButtons(QMessageBox.Cancel | QMessageBox.Ok)
update_msg.setDefaultButton(QMessageBox.Ok)
update_msg.setInformativeText(
"Updated cities can detect sights faster and more accurately. If you choose not to download, the " +
"detection will still work.")
update_msg.buttonClicked.connect(self.handover_city)
update_msg.exec_()
if self.accepted_download is True or latest_version == downloaded_version:
self.accepted_download = False
self.show_download_result()
self.model_selected = True
else:
self.model_selected = False
def handover_city(self, button) -> None:
"""Starts the download of the pre-trained model of the selected city.
Parameters
----------
button:
Pushed button inside the popup.
"""
if "OK" in button.text().upper():
city = self.Box_Stadt.currentText().replace(' ', '_').upper()
self.model_selected = True
model = get_downloaded_model(city)
if model is not None:
with open("weights/" + city + ".pt", "wb+") as file:
file.write(model)
self.accepted_download = True
elif "CANCEL" in button.text().upper():
self.Box_Stadt.setCurrentIndex(0)
def detect_sights(self) -> None:
"""Starts detection for the dropped image or shown webcam video
with the downloaded model and displays the results in the label."""
city = self.Box_Stadt.currentText().replace(' ', '_').upper()
print("Detection Status: " + str(self.detector.detection))
if self.model_selected is False:
self.show_missing_model_popup()
else:
# start drag&drop image detection
if self.stacked_widget.currentIndex() == 0 and self.Button_Bild.text() == DISABLE and \
self.Label_Bild.image != logo_with_text:
print(f"Starting detection of {self.Label_Bild.image}")
wipe_prediction_input_images(INPUT_PREDICTION_DIR)
shutil.copy2(self.Label_Bild.image, INPUT_PREDICTION_DIR)
self.detector.enable_detection()
self.detector.detect(self, weights='weights/' + city + '.pt', debug=self.debug_mode)
# stop video detection
elif self.stacked_widget.currentIndex() == 0 and self.Button_Detection.text() == STOP:
self.stop_video_detection()
time.sleep(2)
self.reactivate_cam()
# if webcam activated
elif self.stacked_widget.currentIndex() == 1:
if self.Button_Detection.text() == START:
self.Button_Detection.setText(QCoreApplication.translate(WINDOW, STOP))
self.Label_Bild.setStyleSheet(
"""
"""
)
print("Video Detection Started")
self.prep_video_detection()
source = self.Box_Camera_selector.currentIndex()
self.detector.enable_detection()
self.detection_thread = Thread(target=self.detector.detect, args=(self,),
kwargs={'weights': 'weights/' + city + '.pt', 'source': str(source - 1),
'image_size': 704, 'debug': self.debug_mode})
self.detection_thread.start()
else:
print("Drop a File or select a Webcam!")
def show_missing_model_popup(self) -> None:
# Show Pop Up to choose a city
emsg = QMessageBox()
emsg.setWindowTitle("No city chosen")
emsg.setWindowIcon(QIcon(logo_without_text))
emsg.setText("You need to choose a city before the detection can start.")
emsg.setIcon(QMessageBox.Warning)
emsg.setStandardButtons(QMessageBox.Ok)
emsg.setDefaultButton(QMessageBox.Ok)
emsg.exec_()
def show_download_result(self) -> None:
# city_pretty_print = self.Box_Stadt.currentText()
self.model_selected = True
newest_vers_msg = QMessageBox()
newest_vers_msg.setWindowTitle("Ready for Detection!")
newest_vers_msg.setWindowIcon(QIcon(logo_without_text))
newest_vers_msg.setText("You can start detecting sights in " + self.current_city + "!")
newest_vers_msg.setStandardButtons(QMessageBox.Ok)
newest_vers_msg.setDefaultButton(QMessageBox.Ok)
newest_vers_msg.exec_()
def request_city(self) -> None:
# Send entered city to dwh and show confirmation popup if the city name is known
city_input = self.Text_City.text()
city_request = city_input.upper()
if len(filter_city(city_input)) == 1:
send_city_request(city_request)
cmsg = QMessageBox()
cmsg.setWindowTitle("Request confirmed")
cmsg.setWindowIcon(QIcon(logo_without_text))
cmsg.setText("Your request to add support for " + city_input + " has been sent to our backend.")
cmsg.setStandardButtons(QMessageBox.Ok)
cmsg.setDefaultButton(QMessageBox.Ok)
cmsg.exec_()
else:
cmsg = QMessageBox()
cmsg.setWindowTitle("Unknown city name")
cmsg.setWindowIcon(QIcon(logo_without_text))
cmsg.setText("The typed city name is not known. Please check the spelling.")
cmsg.setIcon(QMessageBox.Warning)
cmsg.setStandardButtons(QMessageBox.Ok)
cmsg.setDefaultButton(QMessageBox.Ok)
cmsg.exec_()
def dragdrop(self) -> None:
"""Enables / disables Drag&Drop of images."""
if self.Button_Bild.text() == ENABLE:
# stop video detection if active
if self.Button_Detection.text() == STOP:
self.Button_Detection.setText(QCoreApplication.translate(WINDOW, START))
self.detector.disable_detection()
self.Label_Bild.setAcceptDrops(True)
self.Label_Bild.setText("\n\n Drop Image here \n\n")
self.Label_Bild.setStyleSheet(
"""
QLabel{
border: 4px dashed #aaa
}
"""
)
self.Button_Bild.setText(QCoreApplication.translate(WINDOW, DISABLE))
elif self.Button_Bild.text() == DISABLE:
self.Label_Bild.setAcceptDrops(False)
self.Label_Bild.setText("")
self.Label_Bild.setStyleSheet("")
self.Label_Bild.image = logo_with_text
self.Label_Bild.setPixmap(QPixmap(self.Label_Bild.image))
self.Button_Bild.setText(QCoreApplication.translate(WINDOW, ENABLE))
def select_camera(self, i):
"""Starts the selected camera. If "Choose webcam" is selected, it stops the camera.
Parameters
----------
i:
Index of the chosen camera.
"""
self.Label_Bild.image = logo_with_text
self.Label_Bild.setPixmap(QPixmap(self.Label_Bild.image))
if i == 0:
self.camera.stop()
self.detector.disable_detection()
self.Button_Detection.setText(QCoreApplication.translate(WINDOW, START))
self.stacked_widget.setCurrentIndex(0)
self.camera_viewfinder.hide()
self.Label_Bild.show()
time.sleep(2)
self.Label_Bild.image = logo_with_text
self.Label_Bild.setPixmap(QPixmap(self.Label_Bild.image))
self.Label_Bild.setStyleSheet(
"""
"""
)
else:
self.camera_viewfinder.show()
self.stacked_widget.setCurrentIndex(1)
self.Label_Bild.hide()
self.camera = QCamera(self.available_cameras[i - 1])
self.camera.setViewfinder(self.camera_viewfinder)
self.camera.error.connect(lambda: self.alert(self.camera.errorString()))
self.camera.start()
self.Button_Bild.setText(QCoreApplication.translate(WINDOW, ENABLE))
def prep_video_detection(self) -> None:
self.camera.stop()
self.camera_viewfinder.hide()
self.stacked_widget.setCurrentIndex(0)
self.Label_Bild.image = loading_image
self.Label_Bild.setPixmap(QPixmap(self.Label_Bild.image))
self.Label_Bild.show()
def stop_video_detection(self) -> None:
self.Button_Detection.setText(QCoreApplication.translate(WINDOW, START))
self.detector.disable_detection()
self.stacked_widget.setCurrentIndex(1)
self.Label_Bild.hide()
self.camera_viewfinder.show()
def debug_click(self, state):
self.debug_mode = bool(state)
if state:
main_window.resize(self.window_width, self.window_height + self.debug_height)
self.textDebug.widget.setEnabled(True)
else:
main_window.resize(self.window_width, self.window_height)
self.textDebug.widget.setEnabled(False)
def reactivate_cam(self) -> None:
self.Label_Bild.image = logo_with_text
self.Label_Bild.setPixmap(QPixmap(self.Label_Bild.image))
self.camera.start()
def close_all(self) -> None:
if self.Button_Detection.text() == STOP:
self.detector.disable_detection()
self.stop_video_detection()
if __name__ == "__main__":
# starts the UI
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('logo_exe_icon.ico'))
trayIcon = QSystemTrayIcon(QtGui.QIcon(logo_without_text), app)
trayIcon.show()
main_window = QMainWindow()
ui = UiMainWindow(main_window)
app.aboutToQuit.connect(ui.close_all)
main_window.show()
sys.exit(app.exec_())
|
base.py
|
# Copyright (c) 2016, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Build performance test base classes and functionality"""
import json
import logging
import os
import re
import resource
import socket
import shutil
import time
import unittest
import xml.etree.ElementTree as ET
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import partial
from multiprocessing import Process
from multiprocessing import SimpleQueue
from xml.dom import minidom
import oe.path
from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
from oeqa.utils.git import GitError, GitRepo
# Get logger for this module
log = logging.getLogger('build-perf')
# Our own version of runCmd which does not raise AssertErrors which would cause
# errors to interpreted as failures
runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
class KernelDropCaches(object):
"""Container of the functions for dropping kernel caches"""
sudo_passwd = None
@classmethod
def check(cls):
"""Check permssions for dropping kernel caches"""
from getpass import getpass
from locale import getdefaultlocale
cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
ret = runCmd2(cmd, ignore_status=True, data=b'0')
if ret.output.startswith('sudo:'):
pass_str = getpass(
"\nThe script requires sudo access to drop caches between "
"builds (echo 3 > /proc/sys/vm/drop_caches).\n"
"Please enter your sudo password: ")
cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
@classmethod
def drop(cls):
"""Drop kernel caches"""
cmd = ['sudo', '-k']
if cls.sudo_passwd:
cmd.append('-S')
input_data = cls.sudo_passwd + b'\n'
else:
cmd.append('-n')
input_data = b''
cmd += ['tee', '/proc/sys/vm/drop_caches']
input_data += b'3'
runCmd2(cmd, data=input_data)
def str_to_fn(string):
"""Convert string to a sanitized filename"""
return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
class ResultsJsonEncoder(json.JSONEncoder):
"""Extended encoder for build perf test results"""
unix_epoch = datetime.utcfromtimestamp(0)
def default(self, obj):
"""Encoder for our types"""
if isinstance(obj, datetime):
# NOTE: we assume that all timestamps are in UTC time
return (obj - self.unix_epoch).total_seconds()
if isinstance(obj, timedelta):
return obj.total_seconds()
return json.JSONEncoder.default(self, obj)
class BuildPerfTestResult(unittest.TextTestResult):
"""Runner class for executing the individual tests"""
# List of test cases to run
test_run_queue = []
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestResult, self).__init__(*args, **kwargs)
self.out_dir = out_dir
self.hostname = socket.gethostname()
self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
self.start_time = self.elapsed_time = None
self.successes = []
def addSuccess(self, test):
"""Record results from successful tests"""
super(BuildPerfTestResult, self).addSuccess(test)
self.successes.append(test)
def addError(self, test, err):
"""Record results from crashed test"""
test.err = err
super(BuildPerfTestResult, self).addError(test, err)
def addFailure(self, test, err):
"""Record results from failed test"""
test.err = err
super(BuildPerfTestResult, self).addFailure(test, err)
def addExpectedFailure(self, test, err):
"""Record results from expectedly failed test"""
test.err = err
super(BuildPerfTestResult, self).addExpectedFailure(test, err)
def startTest(self, test):
"""Pre-test hook"""
test.base_dir = self.out_dir
log.info("Executing test %s: %s", test.name, test.shortDescription())
self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
super(BuildPerfTestResult, self).startTest(test)
def startTestRun(self):
"""Pre-run hook"""
self.start_time = datetime.utcnow()
def stopTestRun(self):
"""Pre-run hook"""
self.elapsed_time = datetime.utcnow() - self.start_time
def all_results(self):
compound = [('SUCCESS', t, None) for t in self.successes] + \
[('FAILURE', t, m) for t, m in self.failures] + \
[('ERROR', t, m) for t, m in self.errors] + \
[('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
[('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
[('SKIPPED', t, m) for t, m in self.skipped]
return sorted(compound, key=lambda info: info[1].start_time)
def write_buildstats_json(self):
"""Write buildstats file"""
buildstats = OrderedDict()
for _, test, _ in self.all_results():
for key, val in test.buildstats.items():
buildstats[test.name + '.' + key] = val
with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
def write_results_json(self):
"""Write test results into a json-formatted file"""
results = OrderedDict([('tester_host', self.hostname),
('start_time', self.start_time),
('elapsed_time', self.elapsed_time),
('tests', OrderedDict())])
for status, test, reason in self.all_results():
test_result = OrderedDict([('name', test.name),
('description', test.shortDescription()),
('status', status),
('start_time', test.start_time),
('elapsed_time', test.elapsed_time),
('measurements', test.measurements)])
if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
test_result['message'] = str(test.err[1])
test_result['err_type'] = test.err[0].__name__
test_result['err_output'] = reason
elif reason:
test_result['message'] = reason
results['tests'][test.name] = test_result
with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
json.dump(results, fobj, indent=4,
cls=ResultsJsonEncoder)
def write_results_xml(self):
"""Write test results into a JUnit XML file"""
top = ET.Element('testsuites')
suite = ET.SubElement(top, 'testsuite')
suite.set('name', 'oeqa.buildperf')
suite.set('timestamp', self.start_time.isoformat())
suite.set('time', str(self.elapsed_time.total_seconds()))
suite.set('hostname', self.hostname)
suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
suite.set('errors', str(len(self.errors)))
suite.set('skipped', str(len(self.skipped)))
test_cnt = 0
for status, test, reason in self.all_results():
test_cnt += 1
testcase = ET.SubElement(suite, 'testcase')
testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
testcase.set('name', test.name)
testcase.set('description', test.shortDescription())
testcase.set('timestamp', test.start_time.isoformat())
testcase.set('time', str(test.elapsed_time.total_seconds()))
if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
if status in ('FAILURE', 'EXP_FAILURE'):
result = ET.SubElement(testcase, 'failure')
else:
result = ET.SubElement(testcase, 'error')
result.set('message', str(test.err[1]))
result.set('type', test.err[0].__name__)
result.text = reason
elif status == 'SKIPPED':
result = ET.SubElement(testcase, 'skipped')
result.text = reason
elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
raise TypeError("BUG: invalid test status '%s'" % status)
for data in test.measurements.values():
measurement = ET.SubElement(testcase, data['type'])
measurement.set('name', data['name'])
measurement.set('legend', data['legend'])
vals = data['values']
if data['type'] == BuildPerfTestCase.SYSRES:
ET.SubElement(measurement, 'time',
timestamp=vals['start_time'].isoformat()).text = \
str(vals['elapsed_time'].total_seconds())
attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
ET.SubElement(measurement, 'iostat', attrib=attrib)
attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
ET.SubElement(measurement, 'rusage', attrib=attrib)
elif data['type'] == BuildPerfTestCase.DISKUSAGE:
ET.SubElement(measurement, 'size').text = str(vals['size'])
else:
raise TypeError('BUG: unsupported measurement type')
suite.set('tests', str(test_cnt))
# Use minidom for pretty-printing
dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
class BuildPerfTestCase(unittest.TestCase):
"""Base class for build performance tests"""
SYSRES = 'sysres'
DISKUSAGE = 'diskusage'
build_target = None
def __init__(self, *args, **kwargs):
super(BuildPerfTestCase, self).__init__(*args, **kwargs)
self.name = self._testMethodName
self.base_dir = None
self.start_time = None
self.elapsed_time = None
self.measurements = OrderedDict()
self.buildstats = OrderedDict()
# self.err is supposed to be a tuple from sys.exc_info()
self.err = None
self.bb_vars = get_bb_vars()
# TODO: remove 'times' and 'sizes' arrays when globalres support is
# removed
self.times = []
self.sizes = []
@property
def tmp_dir(self):
return os.path.join(self.base_dir, self.name + '.tmp')
def shortDescription(self):
return super(BuildPerfTestCase, self).shortDescription() or ""
def setUp(self):
"""Set-up fixture for each test"""
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
if self.build_target:
self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
def tearDown(self):
"""Tear-down fixture for each test"""
if os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def run(self, *args, **kwargs):
"""Run test"""
self.start_time = datetime.now()
super(BuildPerfTestCase, self).run(*args, **kwargs)
self.elapsed_time = datetime.now() - self.start_time
def run_cmd(self, cmd):
"""Convenience method for running a command"""
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Logging command: %s", cmd_str)
try:
runCmd2(cmd)
except CommandError as err:
log.error("Command failed: %s", err.retcode)
raise
def _append_measurement(self, measurement):
"""Simple helper for adding measurements results"""
if measurement['name'] in self.measurements:
raise ValueError('BUG: two measurements with the same name in {}'.format(
self.__class__.__name__))
self.measurements[measurement['name']] = measurement
def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
"""Measure system resource usage of a command"""
def _worker(data_q, cmd, **kwargs):
"""Worker process for measuring resources"""
try:
start_time = datetime.now()
ret = runCmd2(cmd, **kwargs)
etime = datetime.now() - start_time
rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
iostat = OrderedDict()
with open('/proc/{}/io'.format(os.getpid())) as fobj:
for line in fobj.readlines():
key, val = line.split(':')
iostat[key] = int(val)
rusage = OrderedDict()
# Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
# 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
'ru_majflt', 'ru_inblock', 'ru_oublock',
'ru_nvcsw', 'ru_nivcsw']:
rusage[key] = getattr(rusage_struct, key)
data_q.put({'ret': ret,
'start_time': start_time,
'elapsed_time': etime,
'rusage': rusage,
'iostat': iostat})
except Exception as err:
data_q.put(err)
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Timing command: %s", cmd_str)
data_q = SimpleQueue()
try:
proc = Process(target=_worker, args=(data_q, cmd,))
proc.start()
data = data_q.get()
proc.join()
if isinstance(data, Exception):
raise data
except CommandError:
log.error("Command '%s' failed", cmd_str)
raise
etime = data['elapsed_time']
measurement = OrderedDict([('type', self.SYSRES),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('start_time', data['start_time']),
('elapsed_time', etime),
('rusage', data['rusage']),
('iostat', data['iostat'])])
if save_bs:
self.save_buildstats(name)
self._append_measurement(measurement)
# Append to 'times' array for globalres log
e_sec = etime.total_seconds()
self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
int((e_sec % 3600) / 60),
e_sec % 60))
def measure_disk_usage(self, path, name, legend, apparent_size=False):
"""Estimate disk usage of a file or directory"""
cmd = ['du', '-s', '--block-size', '1024']
if apparent_size:
cmd.append('--apparent-size')
cmd.append(path)
ret = runCmd2(cmd)
size = int(ret.output.split()[0])
log.debug("Size of %s path is %s", path, size)
measurement = OrderedDict([('type', self.DISKUSAGE),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('size', size)])
self._append_measurement(measurement)
# Append to 'sizes' array for globalres log
self.sizes.append(str(size))
def save_buildstats(self, measurement_name):
"""Save buildstats"""
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
def bs_to_json(filename):
"""Convert (task) buildstats file into json format"""
bs_json = OrderedDict()
iostat = OrderedDict()
rusage = OrderedDict()
with open(filename) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Started':
start_time = datetime.utcfromtimestamp(float(val))
bs_json['start_time'] = start_time
elif key == 'Ended':
end_time = datetime.utcfromtimestamp(float(val))
elif key.startswith('IO '):
split = key.split()
iostat[split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
if ru_key in ('ru_stime', 'ru_utime'):
val = float(val)
else:
val = int(val)
rusage[ru_key] = rusage.get(ru_key, 0) + val
elif key == 'Status':
bs_json['status'] = val
bs_json['elapsed_time'] = end_time - start_time
bs_json['rusage'] = rusage
bs_json['iostat'] = iostat
return bs_json
log.info('Saving buildstats in JSON format')
bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
if len(bs_dirs) > 1:
log.warning("Multiple buildstats found for test %s, only "
"archiving the last one", self.name)
bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
buildstats = []
for fname in os.listdir(bs_dir):
recipe_dir = os.path.join(bs_dir, fname)
if not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = split_nevr(fname)
recipe_bs = OrderedDict((('name', name),
('epoch', epoch),
('version', version),
('revision', revision),
('tasks', OrderedDict())))
for task in os.listdir(recipe_dir):
recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
task))
buildstats.append(recipe_bs)
self.buildstats[measurement_name] = buildstats
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
log.debug("Removing temporary and cache files")
for name in ['bitbake.lock', 'conf/sanity_info',
self.bb_vars['TMPDIR']]:
oe.path.remove(name, recurse=True)
def rm_sstate(self):
"""Remove sstate directory"""
log.debug("Removing sstate-cache")
oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
def rm_cache(self):
"""Drop bitbake caches"""
oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
@staticmethod
def sync():
"""Sync and drop kernel caches"""
runCmd2('bitbake -m', ignore_status=True)
log.debug("Syncing and dropping kernel caches""")
KernelDropCaches.drop()
os.sync()
# Wait a bit for all the dirty blocks to be written onto disk
time.sleep(3)
class BuildPerfTestLoader(unittest.TestLoader):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
class BuildPerfTestRunner(unittest.TextTestRunner):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
self.out_dir = out_dir
def _makeResult(self):
return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
self.verbosity)
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
import utils
import multiprocessing
from os.path import join, dirname, abspath, basename, isdir, exists
from datetime import datetime
from Queue import Queue, Empty
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases, flaky_tests_mode):
self.cases = cases
self.flaky_tests_mode = flaky_tests_mode
self.parallel_queue = Queue(len(cases))
self.sequential_queue = Queue(len(cases))
for case in cases:
if case.parallel:
self.parallel_queue.put_nowait(case)
else:
self.sequential_queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[True, i + 1])
threads.append(thread)
thread.start()
try:
self.RunSingle(False, 0)
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self, parallel, thread_id):
while not self.terminate:
try:
test = self.parallel_queue.get_nowait()
except Empty:
if parallel:
return
try:
test = self.sequential_queue.get_nowait()
except Empty:
return
case = test.case
case.thread_id = thread_id
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = datetime.now()
output = case.Run()
case.duration = (datetime.now() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class TapProgressIndicator(SimpleProgressIndicator):
def Starting(self):
print '1..%i' % len(self.cases)
self._done = 0
def AboutToRun(self, case):
pass
def HasRun(self, output):
self._done += 1
command = basename(output.command[-1])
if output.UnexpectedOutput():
status_line = 'not ok %i - %s' % (self._done, command)
if FLAKY in output.test.outcomes and self.flaky_tests_mode == "dontcare":
status_line = status_line + " # TODO : Fix flaky test"
print status_line
for l in output.output.stderr.splitlines():
print '#' + l
for l in output.output.stdout.splitlines():
print '#' + l
else:
status_line = 'ok %i - %s' % (self._done, command)
if FLAKY in output.test.outcomes:
status_line = status_line + " # TODO : Fix flaky test"
print status_line
duration = output.test.duration
# total_seconds() was added in 2.7
total_seconds = (duration.microseconds +
(duration.seconds + duration.days * 24 * 3600) * 10**6) / 10**6
print ' ---'
print ' duration_ms: %d.%d' % (total_seconds, duration.microseconds / 1000)
print ' ...'
def Done(self):
pass
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, flaky_tests_mode, templates):
super(CompactProgressIndicator, self).__init__(cases, flaky_tests_mode)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases, flaky_tests_mode):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, flaky_tests_mode, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'tap': TapProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, arch, mode):
self.path = path
self.context = context
self.duration = None
self.arch = arch
self.mode = mode
self.parallel = False
self.thread_id = 0
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command, env):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode),
env)
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand(), {
"TEST_THREAD_ID": "%d" % self.thread_id
})
finally:
# Tests can leave the tty in non-blocking mode. If the test runner
# tries to print to stdout/stderr after that and the tty buffer is
# full, it'll die with a EAGAIN OSError. Ergo, put the tty back in
# blocking mode before proceeding.
if sys.platform != 'win32':
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK
for fd in 0,1,2: fcntl(fd, F_SETFL, ~O_NONBLOCK & fcntl(fd, F_GETFL))
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None, env={}):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
# Extend environment
env_copy = os.environ.copy()
for key, value in env.iteritems():
env_copy[key] = value
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
env = env_copy
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, arch, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path,
arch, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, arch, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, arch, mode)
result.sort(cmp=lambda a, b: cmp(a.GetName(), b.GetName()))
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, arch, mode):
if arch == 'none':
name = 'out/Debug/iojs' if mode == 'debug' else 'out/Release/iojs'
else:
name = 'out/%s.%s/iojs' % (arch, mode)
# Currently GYP does not support output_dir for MSVS.
# http://code.google.com/p/gyp/issues/detail?id=40
# It will put the builds into Release/iojs.exe or Debug/iojs.exe
if utils.IsWindows():
out_dir = os.path.join(dirname(__file__), "..", "out")
if not exists(out_dir):
if mode == 'debug':
name = os.path.abspath('Debug/iojs.exe')
else:
name = os.path.abspath('Release/iojs.exe')
else:
name = os.path.abspath(name + '.exe')
return name
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks, flaky_tests_mode):
progress = PROGRESS_INDICATORS[progress](cases_to_run, flaky_tests_mode)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
FLAKY = 'flaky'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
self.parallel = self.case.parallel
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono, tap)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--flaky-tests",
help="Regard tests marked as flaky (run|skip|dontcare)",
default="run")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("-J", help="Run tasks in parallel on all cores",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.arch = options.arch.split(',')
options.mode = options.mode.split(',')
if options.J:
options.j = multiprocessing.cpu_count()
def CheckTestMode(name, option):
if not option in ["run", "skip", "dontcare"]:
print "Unknown %s mode %s" % (name, option)
return False
return True
if not CheckTestMode("--flaky-tests", options.flaky_tests):
return False
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = [
'sequential',
'parallel',
'pummel',
'message',
'internet',
'gc',
'debugger',
]
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
processor = GetSpecialCommandProcessor(options.special_command)
context = Context(workspace,
buildspace,
VERBOSE,
shell,
options.timeout,
processor,
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for arch in options.arch:
for mode in options.mode:
vm = context.GetVm(arch, mode)
if not exists(vm):
print "Can't find shell executable: '%s'" % vm
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': arch,
}
test_list = root.ListTests([], path, context, arch, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = (
config.ClassifyTests(test_list, env))
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = (
globally_unused_rules.intersection(unused_rules))
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes or (FLAKY in case.outcomes and options.flaky_tests == "skip")
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 1
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j, options.flaky_tests):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
ar_207_测试_批量添加线程.py
|
from time import ctime,sleep
import threading
def super_player(func,time):
for i in range(3):
print('正在播放%s %s'%(func,ctime()))
sleep(time)
file_dict={'断点.mp3':3,'蜡笔小新.mp4':2,'你还要我怎么样.mp3':4}
threads=[]
times=range(len(file_dict))
'将两个参数的值分别从一个字典的键跟值里面拿出来再传参'
for file,time in file_dict.items():
t=threading.Thread(target=super_player,args=(file,time))
threads.append(t)
if __name__ == '__main__':
'定义了函数,可以通过线程来启动,不用专门的调用'
for time in times:
threads[time].start()
for time in times:
threads[time].join()
print('All end:%s'%(ctime()))
|
test_environments.py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
from threading import Thread
import unittest
from tensorforce import Environment, Runner
from test.unittest_base import UnittestBase
class TestEnvironments(UnittestBase, unittest.TestCase):
num_episodes = 2
@pytest.mark.skip(reason='problems with processes/sockets in travis')
def test_remote_environments(self):
self.start_tests(name='remote-environments')
agent = self.agent_spec(
require_observe=True, update=dict(unit='episodes', batch_size=1),
parallel_interactions=2
)
environment = self.environment_spec()
runner = Runner(
agent=agent, environment=environment, num_parallel=2, remote='multiprocessing'
)
runner.run(num_episodes=self.__class__.num_episodes, use_tqdm=False)
runner.close()
self.finished_test()
def server(port):
Environment.create(environment=environment, remote='socket-server', port=port)
server1 = Thread(target=server, kwargs=dict(port=65432))
server2 = Thread(target=server, kwargs=dict(port=65433))
server1.start()
server2.start()
runner = Runner(
agent=agent, num_parallel=2, remote='socket-client', host='127.0.0.1', port=65432
)
runner.run(num_episodes=self.__class__.num_episodes, use_tqdm=False)
runner.close()
server1.join()
server2.join()
self.finished_test()
# @pytest.mark.skip(reason='not installed as part of travis')
# def test_ale(self):
# self.start_tests(name='ale')
# self.unittest(
# environment=dict(environment='ale', level='test/data/Breakout.bin'), num_episodes=2
# )
# @pytest.mark.skip(reason='not installed as part of travis')
# def test_maze_explorer(self):
# self.start_tests(name='maze-explorer')
# self.unittest(environment=dict(environment='mazeexp', level=0))
# @pytest.mark.skip(reason='not installed as part of travis')
# def test_open_sim(self):
# self.start_tests(name='open-sim')
# self.unittest(environment=dict(environment='osim', level='Arm2D'))
def test_openai_gym(self):
self.start_tests(name='openai-gym')
# state: box, action: discrete
self.unittest(environment=dict(environment='gym', level='CartPole-v0'))
# state: discrete, action: box
self.unittest(
environment=dict(environment='gym', level='GuessingGame', max_episode_steps=False)
)
# state: discrete, action: tuple(discrete)
from gym.envs.algorithmic import ReverseEnv
self.unittest(environment=ReverseEnv)
# state: tuple, action: discrete
from gym.envs.toy_text import BlackjackEnv
self.unittest(environment=BlackjackEnv())
def test_openai_retro(self):
self.start_tests(name='openai-retro')
self.unittest(environment=dict(environment='retro', level='Airstriker-Genesis'))
@pytest.mark.skip(reason='not installed as part of travis')
def test_ple(self):
self.start_tests(name='pygame-learning-environment')
self.unittest(environment=dict(environment='ple', level='Pong'))
@pytest.mark.skip(reason='not installed as part of travis')
def test_vizdoom(self):
self.start_tests(name='vizdoom')
self.unittest(
environment=dict(environment='vizdoom', level='test/data/basic.cfg'), memory=1000
)
|
runInIndesign.py
|
import sublime, sublime_plugin
import os
import threading
import socketserver
import subprocess
import sys
import re
import tempfile
PATH = os.path.dirname(os.path.realpath(__file__))
HOST, PORT = "localhost", 0
class RunInIndesignCommand(sublime_plugin.TextCommand):
def run(self,edit, command):
if command== 'run':
self.runC(self)
else:
self.selectTarget()
def selectTarget(self):
sett=sublime.load_settings('RunInIndesign.sublime-settings')
available=sett.get('available')
print(available)
self.view.window().show_quick_panel([it['name'] for it in available],self.targetSel)
def targetSel(self,index):
sett=sublime.load_settings('RunInIndesign.sublime-settings')
available=[it['identifier'] for it in sett.get('available')][index]
sett.set('target',available)
sublime.save_settings('RunInIndesign.sublime-settings')
def runC(self, edit):
self.window=self.view.window()
self.output_view = Cons(self.window)
self.clear(self.view)
myF=self.getFile()
sublime.status_message("Running "+os.path.basename(myF)+ " with Indesign")
self.output_view.showConsole();
iR=IndRunner(myF,self.output_view,self.processOtuput)
sett=sublime.load_settings('RunInIndesign.sublime-settings')
currentTarget=sett.get('target')
self.output_view.addText("Running "+os.path.basename(myF)+ " with Indesign "+currentTarget+"\n")
iR.scanAndFixTargetEngine()
if sys.platform == "darwin":
iR.runOsx(currentTarget)
else:
iR.runWin('.'+currentTarget if currentTarget else '""')
def getFile(self):
f=self.view.file_name()
if f==None or self.view.is_dirty():
self.window.run_command('save')
f=self.view.file_name()
return f
def saveCurrentViewInTempFile(self, view):
#
# Create a temporary file to hold our contents
#
tempFile = tempfile.NamedTemporaryFile(suffix = ".jsx", delete = False)
#
# Get the contents of the current view
#
region = sublime.Region(0, view.size())
text = view.substr(region)
tempFile.write(text.encode("utf-8"))
tempFile.close()
return tempFile.name
def markLine(self,view, line_number):
self.clear(view)
print(line_number)
region = view.text_point(line_number-1, 0)
line = view.line(region)
view.add_regions(
'jsx_error',
[line],
'keyword',
'dot',
sublime.DRAW_NO_FILL
)
def clear(self,view):
view.erase_regions('jsx_error')
def processOtuput(self):
log=self.output_view.content
print(log)
isInError=None
isInError=re.search('\[Exited with error\]',log)
if isInError:
try:
err=re.search('\s+Error:(.+)',log).group(1)
f=re.search('\s+File:(.+)',log).group(1)
line=re.search('\s+Line:(\d+)',log).group(1)
sublime.status_message(err)
f=f.replace('\\','/')
v=self.window.find_open_file(f)
if v==None:
v=self.window.open_file(f)
while v.is_loading():
pass
try:
l=int(line)
except ValueError:
l=0
self.markLine(v,l)
self.window.focus_view(v)
except Exception as e:
self.output_view.addText('\nCannot get errors: '+str(e))
else:
sublime.status_message("No errors")
class Cons(object):
def __init__(self,window):
self.content=''
self.output_view = window.create_output_panel("console")
self.output_view.assign_syntax('Packages/Text/Plain text.tmLanguage')
self.output_view.settings().set("word_wrap", True)
self.output_view.settings().set("line_numbers", False)
self.output_view.settings().set("gutter", False)
self.output_view.settings().set("scroll_past_end", False)
self.window=window
def addText(self,txt):
str = txt.replace('\r\n', '\n').replace('\r', '\n')
self.content=self.content+str
self.output_view.run_command('append', {'characters': str, 'force': True, 'scroll_to_end': True})
def showConsole(self):
self.window.run_command("show_panel", {"panel": "output.console"})
class LogServer (socketserver.TCPServer):
def __init__(self, server_address, RequestHandlerClass, cons, onExit, bind_and_activate=True):
self.console=cons
self.onExit=onExit
socketserver.TCPServer.__init__(self,server_address,RequestHandlerClass)
return
class LogRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
msg=''
while True:
data = str(self.request.recv(1024),'utf-8')
if not data: break
msg=msg+data
#print(msg)
if msg=="<ServerClose/>":
self.server.onExit()
self.server.shutdown()
else:
self.server.console.showConsole()
self.server.console.addText(msg)
# Encapsulates subprocess.Popen, listens for exit
class AsyncProcess(object):
def __init__(self, shell_cmd):
if not shell_cmd:
raise ValueError("shell_cmd is required")
if shell_cmd and not isinstance(shell_cmd, str):
raise ValueError("shell_cmd must be a string")
self.killed = False
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO()
print (shell_cmd)
if sys.platform == "win32":
# Use shell=True on Windows, so shell_cmd is passed through with the correct escaping
self.proc = subprocess.Popen(shell_cmd, startupinfo=startupinfo, shell=True)
elif shell_cmd and sys.platform == "darwin":
# Use a login shell on OSX, otherwise the users expected env vars won't be setup
self.proc = subprocess.Popen(["/bin/bash", "-l", "-c", shell_cmd], startupinfo=startupinfo, shell=False)
self.proc.wait()
def kill(self):
if not self.killed:
self.killed = True
if sys.platform == "win32":
# terminate would not kill process opened by the shell cmd.exe, it will only kill
# cmd.exe leaving the child running
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen("taskkill /PID " + str(self.proc.pid), startupinfo=startupinfo)
else:
self.proc.terminate()
def poll(self):
return self.proc.poll() == None
def exit_code(self):
return self.proc.poll()
class IndRunner(object):
"""docstring for IndRunner"""
def __init__(self,fileToRun,cons,finis):
self.finis=finis
self.winRun=os.path.join(PATH,'utils','runJs.vbs')
self.osxRun=os.path.join(PATH,'utils','runJS.scpt')
self.jsxRun=os.path.join(PATH,'utils','jsRunner.jsx')
self.server = LogServer((HOST, PORT),LogRequestHandler,cons,finis)
self.runFile = fileToRun
ip, self.port = self.server.server_address
self.server_thread = threading.Thread(target=self.server.serve_forever, name='Server')
def scanAndFixTargetEngine(self):
##reset the jsx to be sure
f=open(self.jsxRun,'r')
txt=f.read()
f.close()
txt=re.sub('#targetengine.+?[\n\r]+','',txt,1,re.M)
f=open(self.jsxRun,'w')
f.write(txt)
f.close()
f = open(self.runFile,'r',encoding="utf-8")
txt=f.read()
incl=re.search('#targetengine.+?$',txt,re.M)
f.close()
if incl:
f=open(self.jsxRun,'r')
txt=f.read()
f.close()
txt=incl.group(0)+'\n'+txt
f=open(self.jsxRun,'w')
f.write(txt)
f.close()
def runOsx(self,specif):
try:
print('specif:'+specif)
cmd='osascript "{}" "{}" {:d} {}'.format(self.osxRun,self.runFile,self.port,specif)
print (cmd)
self.server_thread.start()
self.proc = threading.Thread(target=AsyncProcess, args=(cmd,))
print('Server started')
self.proc.start()
except:
self.server.shutdown()
def runWin(self, specif):
try:
cmd='cscript "{}" "{}" {:d} {}'.format(self.winRun,self.runFile,self.port,specif)
print(cmd)
self.server_thread.start()
self.proc = threading.Thread(target=AsyncProcess, args=(cmd,))
print('Server started')
self.proc.start()
except:
self.server.shutdown()
def finishRun(self):
self.finis()
print('End Server')
# if __name__ == '__main__':
# iR=IndRunner(os.path.join(PATH,'utils','test.jsx'),cons)
# iR.runWin('CC')
|
burst.py
|
# -*- coding: utf-8 -*-
"""
Burst processing thread
"""
import re
import json
import time
import xbmc
import xbmcgui
from Queue import Queue
from threading import Thread
from urlparse import urlparse
from urllib import unquote
from elementum.provider import append_headers, get_setting, set_setting, log
from parser.ehp import Html
from provider import process
from providers.definitions import definitions, longest
from filtering import apply_filters, Filtering
from client import USER_AGENT, Client, get_cloudhole_key, get_cloudhole_clearance
from utils import ADDON_ICON, notify, translation, sizeof, get_icon_path, get_enabled_providers, get_alias
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
timeout = get_setting("timeout", int)
special_chars = "()\"':.[]<>/\\?"
def search(payload, method="general"):
""" Main search entrypoint
Args:
payload (dict): Search payload from Elementum.
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
Returns:
list: All filtered results in the format Elementum expects
"""
log.debug("Searching with payload (%s): %s" % (method, repr(payload)))
if method == 'general':
if 'query' in payload:
payload['title'] = payload['query']
payload['titles'] = {
'source': payload['query']
}
else:
payload = {
'title': payload,
'titles': {
'source': payload
},
}
payload['titles'] = dict((k.lower(), v) for k, v in payload['titles'].iteritems())
# If titles[] exists in payload and there are special chars in titles[source]
# then we set a flag to possibly modify the search query
payload['has_special'] = 'titles' in payload and \
bool(payload['titles']) and \
'source' in payload['titles'] and \
any(c in payload['titles']['source'] for c in special_chars)
if payload['has_special']:
log.debug("Query title contains special chars, so removing any quotes in the search query")
global request_time
global provider_names
global provider_results
global available_providers
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
providers = get_enabled_providers(method)
if len(providers) == 0:
notify(translation(32060), image=get_icon_path())
log.error("No providers enabled")
return []
log.info("Burstin' with %s" % ", ".join([definitions[provider]['name'] for provider in providers]))
if get_setting("use_cloudhole", bool):
clearance, user_agent = get_cloudhole_clearance(get_cloudhole_key())
set_setting('clearance', clearance)
set_setting('user_agent', user_agent)
if get_setting('kodi_language', bool):
kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
if not kodi_language:
log.warning("Kodi returned empty language code...")
elif 'titles' not in payload or not payload['titles']:
log.info("No translations available...")
elif payload['titles'] and kodi_language not in payload['titles']:
log.info("No '%s' translation available..." % kodi_language)
p_dialog = xbmcgui.DialogProgressBG()
p_dialog.create('Elementum [COLOR FFFF6B00]Burst[/COLOR]', translation(32061))
for provider in providers:
available_providers += 1
provider_names.append(definitions[provider]['name'])
task = Thread(target=run_provider, args=(provider, payload, method))
task.start()
providers_time = time.time()
total = float(available_providers)
# Exit if all providers have returned results or timeout reached, check every 100ms
while time.time() - providers_time < timeout and available_providers > 0:
timer = time.time() - providers_time
log.debug("Timer: %ds / %ds" % (timer, timeout))
if timer > timeout:
break
message = translation(32062) % available_providers if available_providers > 1 else translation(32063)
p_dialog.update(int((total - available_providers) / total * 100), message=message)
time.sleep(0.25)
p_dialog.close()
del p_dialog
if available_providers > 0:
message = u', '.join(provider_names)
message = message + translation(32064)
log.warning(message.encode('utf-8'))
notify(message, ADDON_ICON)
log.debug("all provider_results: %s" % repr(provider_results))
filtered_results = apply_filters(provider_results)
log.debug("all filtered_results: %s" % repr(filtered_results))
log.info("Providers returned %d results in %s seconds" % (len(filtered_results), round(time.time() - request_time, 2)))
return filtered_results
def got_results(provider, results):
""" Results callback once a provider found all its results, or not
Args:
provider (str): The provider ID
results (list): The list of results
"""
global provider_names
global provider_results
global available_providers
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
max_results = get_setting('max_results', int)
sorted_results = sorted(results, key=lambda r: (r['seeds']), reverse=True)
if len(sorted_results) > max_results:
sorted_results = sorted_results[:max_results]
log.info(">> %s returned %2d results in %.1f seconds%s" % (
definition['name'].rjust(longest), len(results), round(time.time() - request_time, 2),
(", sending %d best ones" % max_results) if len(results) > max_results else ""))
provider_results.extend(sorted_results)
available_providers -= 1
if definition['name'] in provider_names:
provider_names.remove(definition['name'])
def extract_torrents(provider, client):
""" Main torrent extraction generator for non-API based providers
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
log.debug("Extracting torrents from %s using definitions: %s" % (provider, repr(definition)))
if not client.content:
raise StopIteration
dom = Html().feed(client.content)
key_search = get_search_query(definition, "key")
row_search = get_search_query(definition, "row")
name_search = get_search_query(definition, "name")
torrent_search = get_search_query(definition, "torrent")
info_hash_search = get_search_query(definition, "infohash")
size_search = get_search_query(definition, "size")
seeds_search = get_search_query(definition, "seeds")
peers_search = get_search_query(definition, "peers")
referer_search = get_search_query(definition, "referer")
log.debug("[%s] Parser: %s" % (provider, repr(definition['parser'])))
q = Queue()
threads = []
needs_subpage = 'subpage' in definition and definition['subpage']
if needs_subpage:
def extract_subpage(q, name, torrent, size, seeds, peers, info_hash, referer):
try:
log.debug("[%s] Getting subpage at %s" % (provider, repr(torrent)))
except Exception as e:
import traceback
log.error("[%s] Subpage logging failed with: %s" % (provider, repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
# New client instance, otherwise it's race conditions all over the place
subclient = Client()
subclient.passkey = client.passkey
headers = {}
if get_setting("use_cloudhole", bool):
subclient.clearance = get_setting('clearance')
subclient.user_agent = get_setting('user_agent')
if "subpage_mode" in definition:
if definition["subpage_mode"] == "xhr":
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['Content-Language'] = ''
if referer:
headers['Referer'] = referer
uri = torrent.split('|') # Split cookies for private trackers
subclient.open(uri[0].encode('utf-8'), headers=headers)
if 'bittorrent' in subclient.headers.get('content-type', ''):
log.debug('[%s] bittorrent content-type for %s' % (provider, repr(torrent)))
if len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
else:
try:
torrent = extract_from_page(provider, subclient.content)
if torrent and not torrent.startswith('magnet') and len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
except Exception as e:
import traceback
log.error("[%s] Subpage extraction for %s failed with: %s" % (provider, repr(uri[0]), repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
ret = (name, info_hash, torrent, size, seeds, peers)
q.put_nowait(ret)
if not dom:
raise StopIteration
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content: %s" % (provider, client.content.replace('\r', '').replace('\n', '')))
key = eval(key_search) if key_search else ""
if key_search and get_setting("use_debug_parser", bool):
key_str = key.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'key', key_search, key_str.replace('\r', '').replace('\n', '')))
items = eval(row_search)
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" % (provider, len(items), 'row', row_search))
for item in items:
if get_setting("use_debug_parser", bool):
item_str = item.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'row', row_search, item_str.replace('\r', '').replace('\n', '')))
if not item:
continue
name = eval(name_search) if name_search else ""
torrent = eval(torrent_search) if torrent_search else ""
size = eval(size_search) if size_search else ""
seeds = eval(seeds_search) if seeds_search else ""
peers = eval(peers_search) if peers_search else ""
info_hash = eval(info_hash_search) if info_hash_search else ""
referer = eval(referer_search) if referer_search else ""
if 'magnet:?' in torrent:
torrent = torrent[torrent.find('magnet:?'):]
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'name', name_search, name))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'torrent', torrent_search, torrent))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'size', size_search, size))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'seeds', seeds_search, seeds))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'peers', peers_search, peers))
if info_hash_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', info_hash_search, info_hash))
if referer_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', referer_search, referer))
# Pass client cookies with torrent if private
if (definition['private'] or get_setting("use_cloudhole", bool)) and not torrent.startswith('magnet'):
user_agent = USER_AGENT
if get_setting("use_cloudhole", bool):
user_agent = get_setting("user_agent")
if client.passkey:
torrent = torrent.replace('PASSKEY', client.passkey)
elif client.token:
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
else:
log.debug("[%s] Cookies: %s" % (provider, repr(client.cookies())))
parsed_url = urlparse(definition['root_url'])
cookie_domain = '{uri.netloc}'.format(uri=parsed_url).replace('www.', '')
cookies = []
# log.debug("[%s] cookie_domain: %s" % (provider, cookie_domain))
for cookie in client._cookies:
# log.debug("[%s] cookie for domain: %s (%s=%s)" % (provider, cookie.domain, cookie.name, cookie.value))
if cookie_domain in cookie.domain:
cookies.append(cookie)
if cookies:
headers = {'Cookie': ";".join(["%s=%s" % (c.name, c.value) for c in cookies]), 'User-Agent': user_agent}
# log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
# log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
if name and torrent and needs_subpage and not torrent.startswith('magnet'):
if not torrent.startswith('http'):
torrent = definition['root_url'] + torrent.encode('utf-8')
t = Thread(target=extract_subpage, args=(q, name, torrent, size, seeds, peers, info_hash, referer))
threads.append(t)
else:
yield (name, info_hash, torrent, size, seeds, peers)
if needs_subpage:
log.debug("[%s] Starting subpage threads..." % provider)
for t in threads:
t.start()
for t in threads:
t.join()
log.debug("[%s] Threads returned: %s" % (provider, repr(threads)))
for i in range(q.qsize()):
ret = q.get_nowait()
log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret)))
yield ret
def extract_from_api(provider, client):
""" Main API parsing generator for API-based providers
An almost clever API parser, mostly just for YTS, RARBG and T411
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
try:
data = json.loads(client.content)
except:
data = []
log.debug("[%s] JSON response from API: %s" % (unquote(provider), repr(data)))
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
api_format = definition['api_format']
results = []
result_keys = api_format['results'].split('.')
log.debug("%s result_keys: %s" % (provider, repr(result_keys)))
for key in result_keys:
if key in data:
data = data[key]
else:
data = []
# log.debug("%s nested results: %s" % (provider, repr(data)))
results = data
log.debug("%s results: %s" % (provider, repr(results)))
if 'subresults' in api_format:
from copy import deepcopy
for result in results: # A little too specific to YTS but who cares...
result['name'] = result[api_format['name']]
subresults = []
subresults_keys = api_format['subresults'].split('.')
for key in subresults_keys:
for result in results:
if key in result:
for subresult in result[key]:
sub = deepcopy(result)
sub.update(subresult)
subresults.append(sub)
results = subresults
log.debug("%s with subresults: %s" % (provider, repr(results)))
for result in results:
if not result or not isinstance(result, dict):
continue
name = ''
info_hash = ''
torrent = ''
size = ''
seeds = ''
peers = ''
if 'name' in api_format:
name = result[api_format['name']]
if 'torrent' in api_format:
torrent = result[api_format['torrent']]
if 'download_path' in definition:
torrent = definition['base_url'] + definition['download_path'] + torrent
if client.token:
user_agent = USER_AGENT
if get_setting("use_cloudhole", bool):
user_agent = get_setting("user_agent")
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
if 'info_hash' in api_format:
info_hash = result[api_format['info_hash']]
if 'quality' in api_format: # Again quite specific to YTS...
name = "%s - %s" % (name, result[api_format['quality']])
if 'size' in api_format:
size = result[api_format['size']]
if type(size) in (long, int):
size = sizeof(size)
elif type(size) in (str, unicode) and size.isdigit():
size = sizeof(int(size))
if 'seeds' in api_format:
seeds = result[api_format['seeds']]
if type(seeds) in (str, unicode) and seeds.isdigit():
seeds = int(seeds)
if 'peers' in api_format:
peers = result[api_format['peers']]
if type(peers) in (str, unicode) and peers.isdigit():
peers = int(peers)
yield (name, info_hash, torrent, size, seeds, peers)
def extract_from_page(provider, content):
""" Sub-page extraction method
Args:
provider (str): Provider ID
content (str): Page content from Client instance
Returns:
str: Torrent or magnet link extracted from sub-page
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
try:
matches = re.findall(r'magnet:\?[^\'"\s<>\[\]]+', content)
if matches:
result = matches[0]
log.debug('[%s] Matched magnet link: %s' % (provider, repr(result)))
return result
matches = re.findall('http(.*?).torrent["\']', content)
if matches:
result = 'http' + matches[0] + '.torrent'
result = result.replace('torcache.net', 'itorrents.org')
log.debug('[%s] Matched torrent link: %s' % (provider, repr(result)))
return result
matches = re.findall('/download\?token=[A-Za-z0-9%]+', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with token: %s' % (provider, repr(result)))
return result
matches = re.findall('/torrents/download/\?id=[a-z0-9-_.]+', content) # t411
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with an ID: %s' % (provider, repr(result)))
return result
except:
pass
return None
def run_provider(provider, payload, method):
""" Provider thread entrypoint
Args:
provider (str): Provider ID
payload (dict): Search payload from Elementum
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
"""
log.debug("Processing %s with %s method" % (provider, method))
filterInstance = Filtering()
if method == 'movie':
filterInstance.use_movie(provider, payload)
elif method == 'season':
filterInstance.use_season(provider, payload)
elif method == 'episode':
filterInstance.use_episode(provider, payload)
elif method == 'anime':
filterInstance.use_anime(provider, payload)
else:
filterInstance.use_general(provider, payload)
if 'is_api' in definitions[provider]:
results = process(provider=provider, generator=extract_from_api, filtering=filterInstance, has_special=payload['has_special'])
else:
results = process(provider=provider, generator=extract_torrents, filtering=filterInstance, has_special=payload['has_special'])
got_results(provider, results)
def get_search_query(definition, key):
if 'parser' not in definition or key not in definition['parser']:
return ""
if key == 'key' or key == 'table' or key == 'row':
return "dom." + definition['parser'][key]
return definition['parser'][key]
|
http_server.py
|
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
# Many tests expect there to be an http server on port 4545 servering the deno
# root directory.
from collections import namedtuple
from contextlib import contextmanager
import os
import SimpleHTTPServer
import SocketServer
import socket
import sys
from time import sleep
from threading import Thread
from util import root_path
import ssl
import getopt
import argparse
PORT = 4545
REDIRECT_PORT = 4546
ANOTHER_REDIRECT_PORT = 4547
DOUBLE_REDIRECTS_PORT = 4548
INF_REDIRECTS_PORT = 4549
REDIRECT_ABSOLUTE_PORT = 4550
HTTPS_PORT = 5545
def create_http_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
return parser
HttpArgParser = create_http_arg_parser()
args, unknown = HttpArgParser.parse_known_args(sys.argv[1:])
CERT_FILE = os.path.join(root_path, "std/http/testdata/tls/localhost.crt")
KEY_FILE = os.path.join(root_path, "std/http/testdata/tls/localhost.key")
QUIET = not args.verbose
class SSLTCPServer(SocketServer.TCPServer):
def __init__(self,
server_address,
request_handler,
certfile,
keyfile,
ssl_version=ssl.PROTOCOL_TLSv1_2,
bind_and_activate=True):
SocketServer.TCPServer.__init__(self, server_address, request_handler,
bind_and_activate)
self.certfile = certfile
self.keyfile = keyfile
self.ssl_version = ssl_version
def get_request(self):
newsocket, fromaddr = self.socket.accept()
connstream = ssl.wrap_socket(
newsocket,
server_side=True,
certfile=self.certfile,
keyfile=self.keyfile,
ssl_version=self.ssl_version)
return connstream, fromaddr
class SSLThreadingTCPServer(SocketServer.ThreadingMixIn, SSLTCPServer):
pass
class QuietSimpleHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_request(self, code='-', size='-'):
if not QUIET:
SimpleHTTPServer.SimpleHTTPRequestHandler.log_request(
self, code, size)
class ContentTypeHandler(QuietSimpleHTTPRequestHandler):
def do_GET(self):
# Check if there is a custom header configuration ending
# with ".header" before sending the file
maybe_header_file_path = "./" + self.path + ".header"
if os.path.exists(maybe_header_file_path):
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
f = open(maybe_header_file_path)
for line in f:
kv = line.split(": ")
self.send_header(kv[0].strip(), kv[1].strip())
f.close()
self.end_headers()
body = open("./" + self.path)
self.wfile.write(body.read())
body.close()
return
if "etag_script.ts" in self.path:
self.protocol_version = 'HTTP/1.1'
if_not_match = self.headers.getheader('if-none-match')
if if_not_match == "33a64df551425fcc55e":
self.send_response(304, 'Not Modified')
self.send_header('Content-type', 'application/typescript')
self.send_header('ETag', '33a64df551425fcc55e')
self.end_headers()
else:
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/typescript')
self.send_header('ETag', '33a64df551425fcc55e')
self.end_headers()
self.wfile.write(bytes("console.log('etag')"))
return
if "xTypeScriptTypes.js" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/javascript')
self.send_header('X-TypeScript-Types', './xTypeScriptTypes.d.ts')
self.end_headers()
self.wfile.write(bytes("export const foo = 'foo';"))
return
if "type_directives_redirect.js" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/javascript')
self.send_header(
'X-TypeScript-Types',
'http://localhost:4547/xTypeScriptTypesRedirect.d.ts')
self.end_headers()
self.wfile.write(bytes("export const foo = 'foo';"))
return
if "xTypeScriptTypesRedirect.d.ts" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/typescript')
self.end_headers()
self.wfile.write(
bytes("import './xTypeScriptTypesRedirected.d.ts';"))
return
if "xTypeScriptTypesRedirected.d.ts" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/typescript')
self.end_headers()
self.wfile.write(bytes("export const foo: 'foo';"))
return
if "xTypeScriptTypes.d.ts" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/typescript')
self.end_headers()
self.wfile.write(bytes("export const foo: 'foo';"))
return
if "referenceTypes.js" in self.path:
self.protocol_version = "HTTP/1.1"
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/javascript')
self.end_headers()
self.wfile.write(
bytes('/// <reference types="./xTypeScriptTypes.d.ts" />\r\n'
'export const foo = "foo";\r\n'))
return
if "multipart_form_data.txt" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
self.send_header('Content-type',
'multipart/form-data;boundary=boundary')
self.end_headers()
self.wfile.write(
bytes('Preamble\r\n'
'--boundary\t \r\n'
'Content-Disposition: form-data; name="field_1"\r\n'
'\r\n'
'value_1 \r\n'
'\r\n--boundary\r\n'
'Content-Disposition: form-data; name="field_2"; '
'filename="file.js"\r\n'
'Content-Type: text/javascript\r\n'
'\r\n'
'console.log("Hi")'
'\r\n--boundary--\r\n'
'Epilogue'))
return
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
# Simple echo server for request reflection
if "echo_server" in self.path:
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
if self.headers.has_key('content-type'):
self.send_header('content-type',
self.headers.getheader('content-type'))
if self.headers.has_key('user-agent'):
self.send_header('user-agent',
self.headers.getheader('user-agent'))
self.end_headers()
data_string = self.rfile.read(int(self.headers['Content-Length']))
self.wfile.write(bytes(data_string))
return
self.protocol_version = 'HTTP/1.1'
self.send_response(501)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('Server does not support this operation'))
def guess_type(self, path):
if ".t1." in path:
return "text/typescript"
if ".t2." in path:
return "video/vnd.dlna.mpeg-tts"
if ".t3." in path:
return "video/mp2t"
if ".t4." in path:
return "application/x-typescript"
if ".j1." in path:
return "text/javascript"
if ".j2." in path:
return "application/ecmascript"
if ".j3." in path:
return "text/ecmascript"
if ".j4." in path:
return "application/x-javascript"
if "form_urlencoded" in path:
return "application/x-www-form-urlencoded"
if "no_ext" in path:
return "text/typescript"
if "unknown_ext" in path:
return "text/typescript"
if "mismatch_ext" in path:
return "text/javascript"
return SimpleHTTPServer.SimpleHTTPRequestHandler.guess_type(self, path)
RunningServer = namedtuple("RunningServer", ["server", "thread"])
def get_socket(port, handler, use_https):
SocketServer.TCPServer.allow_reuse_address = True
if os.name != "nt":
# We use AF_INET6 to avoid flaky test issue, particularly with
# the test 019_media_types. It's not well understood why this fixes the
# flaky tests, but it does appear to...
# See https://github.com/denoland/deno/issues/3332
SocketServer.TCPServer.address_family = socket.AF_INET6
if use_https:
return SSLThreadingTCPServer(("", port), handler, CERT_FILE, KEY_FILE)
return SocketServer.TCPServer(("", port), handler)
def server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = ContentTypeHandler
Handler.extensions_map.update({
".ts": "application/typescript",
".js": "application/javascript",
".tsx": "application/typescript",
".jsx": "application/javascript",
".json": "application/json",
})
s = get_socket(PORT, Handler, False)
if not QUIET:
print "Deno test server http://localhost:%d/" % PORT
return RunningServer(s, start(s))
def base_redirect_server(host_port, target_port, extra_path_segment=""):
os.chdir(root_path)
target_host = "http://localhost:%d" % target_port
class RedirectHandler(QuietSimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location',
target_host + extra_path_segment + self.path)
self.end_headers()
s = get_socket(host_port, RedirectHandler, False)
if not QUIET:
print "redirect server http://localhost:%d/ -> http://localhost:%d/" % (
host_port, target_port)
return RunningServer(s, start(s))
# redirect server
def redirect_server():
return base_redirect_server(REDIRECT_PORT, PORT)
# another redirect server pointing to the same port as the one above
# BUT with an extra subdir path
def another_redirect_server():
return base_redirect_server(
ANOTHER_REDIRECT_PORT, PORT, extra_path_segment="/cli/tests/subdir")
# redirect server that points to another redirect server
def double_redirects_server():
return base_redirect_server(DOUBLE_REDIRECTS_PORT, REDIRECT_PORT)
# redirect server that points to itself
def inf_redirects_server():
return base_redirect_server(INF_REDIRECTS_PORT, INF_REDIRECTS_PORT)
# redirect server that redirect to absolute paths under same host
# redirects /REDIRECT/file_name to /file_name
def absolute_redirect_server():
os.chdir(root_path)
class AbsoluteRedirectHandler(ContentTypeHandler):
def do_GET(self):
print(self.path)
if (self.path.startswith("/REDIRECT/")):
self.send_response(302)
self.send_header('Location',
self.path.split('/REDIRECT', 1)[1])
self.end_headers()
else:
ContentTypeHandler.do_GET(self)
s = get_socket(REDIRECT_ABSOLUTE_PORT, AbsoluteRedirectHandler, False)
if not QUIET:
print("absolute redirect server http://localhost:%d/" %
REDIRECT_ABSOLUTE_PORT)
return RunningServer(s, start(s))
def https_server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = ContentTypeHandler
Handler.extensions_map.update({
".ts": "application/typescript",
".js": "application/javascript",
".tsx": "application/typescript",
".jsx": "application/javascript",
".json": "application/json",
})
s = get_socket(HTTPS_PORT, Handler, True)
if not QUIET:
print "Deno https test server https://localhost:%d/" % HTTPS_PORT
return RunningServer(s, start(s))
def start(s):
thread = Thread(target=s.serve_forever, kwargs={"poll_interval": 0.05})
thread.daemon = True
thread.start()
return thread
@contextmanager
def spawn():
servers = (server(), redirect_server(), another_redirect_server(),
double_redirects_server(), https_server(),
absolute_redirect_server())
# In order to wait for each of the servers to be ready, we try connecting to
# them with a tcp socket.
for running_server in servers:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = running_server.server.server_address[1]
client.connect(("127.0.0.1", port))
print "connected", port
client.close()
assert running_server.thread.is_alive()
# The following output "ready" is specificly looked for in cli/test_util.rs
# to prevent race conditions.
print "ready"
try:
yield servers
finally:
for s in servers:
# Make sure all servers still running,
# if not assume there was an error
assert s.thread.is_alive()
s.server.shutdown()
def main():
with spawn() as servers:
try:
while all(s.thread.is_alive() for s in servers):
sleep(1)
except KeyboardInterrupt:
pass
sys.exit(1)
if __name__ == '__main__':
main()
|
dx_operations_vdb.py
|
#!/usr/bin/env python
# Corey Brune - Oct 2016
#This script starts or stops a VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name> [--stop | --start | --enable | --disable] | --list | --all_dbs <name>)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py --engine landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb testvdb --start
dx_operations_vdb.py --all_dbs enable
dx_operations_vdb.py --all_dbs disable
dx_operations_vdb.py --list
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--all_dbs <name> Enable or disable all dSources and VDBs
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.3.002'
import sys
from os.path import basename
from time import sleep, time
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import source
from delphixpy.v1_8_0.web.capacity import consumer
from docopt import docopt
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetReferences import find_all_objects
from lib.GetReferences import find_obj_list
from lib.GetReferences import find_source_by_dbname
from lib.GetSession import GetSession
def vdb_operation(vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
"""
print_debug('Searching for {} reference.\n'.format(vdb_name))
vdb_obj = find_source_by_dbname(dx_session_obj.server_session, database, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'stop':
source.stop(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'enable':
source.enable(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'disable':
source.disable(dx_session_obj.server_session,
vdb_obj.reference)
dx_session_obj.jobs[dx_session_obj.server_session.address] = \
dx_session_obj.server_session.last_job
except (RequestError, HttpError, JobError, AttributeError), e:
print('An error occurred while performing {} on {}.:'
'{}\n'.format(operation, vdb_name, e))
def all_databases(operation):
"""
Enable or disable all dSources and VDBs on an engine
operation: enable or disable dSources and VDBs
"""
for db in database.get_all(dx_session_obj.server_session, no_js_container_data_source=True):
print '{} {}\n'.format(operation, db.name)
vdb_operation(db.name, operation)
sleep(2)
def list_databases():
"""
Function to list all databases for a given engine
"""
source_stats_lst = find_all_objects(dx_session_obj.server_session, source)
is_dSource = None
try:
for db_stats in find_all_objects(dx_session_obj.server_session,
consumer):
source_stats = find_obj_list(source_stats_lst, db_stats.name)
if source_stats is not None:
if source_stats.virtual is False:
is_dSource = 'dSource'
elif source_stats.virtual is True:
is_dSource = db_stats.parent
print('name = {}\nprovision container= {}\ndatabase disk '
'usage: {:.2f} GB\nSize of Snapshots: {:.2f} GB\n'
'Enabled: {}\nStatus:{}\n'.format(str(db_stats.name),
str(is_dSource),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024,
source_stats.runtime.enabled,
source_stats.runtime.status))
elif source_stats is None:
print('name = {}\nprovision container= {}\ndatabase disk '
'usage: {:.2f} GB\nSize of Snapshots: {:.2f} GB\n'
'Could not find source information. This could be a '
'result of an unlinked object.\n'.format(
str(db_stats.name), str(db_stats.parent),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024))
except (RequestError, JobError, AttributeError, DlpxException) as e:
print 'An error occurred while listing databases: {}'.format((e))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
jobs = {}
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
except DlpxException as e:
print_exception('\nERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
with dx_session_obj.job_mode(single_thread):
while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo)> 0:
if arguments['--start']:
vdb_operation(arguments['--vdb'], 'start')
elif arguments['--stop']:
vdb_operation(arguments['--vdb'], 'stop')
elif arguments['--enable']:
vdb_operation(arguments['--vdb'], 'enable')
elif arguments['--disable']:
vdb_operation(arguments['--vdb'], 'disable')
elif arguments['--list']:
list_databases()
elif arguments['--all_dbs']:
try:
assert arguments['--all_dbs'] in 'disable' or \
arguments['--all_dbs'] in 'enable', \
'--all_dbs should be either enable or disable'
all_databases(arguments['--all_dbs'])
except AssertionError as e:
print 'ERROR:\n{}\n'.format(e)
sys.exit(1)
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(dx_session_obj.server_session,
dx_session_obj.jobs[j])
print_debug(job_obj)
print_info('{}: Operations: {}'.format(engine['hostname'],
job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
#If the job is in a running state, increment the running
# job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
#If we have running jobs, pause before repeating the checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments['--poll']))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
engine = None
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in run_job():\n{}'.format(e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
(arguments['--engine'])))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value '
'and try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
#elapsed_minutes = round((time() - time_start)/60, +1)
#return elapsed_minutes
return round((time() - time_start)/60, +1)
def main(arguments):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments['--debug']:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
#elapsed_minutes = time_elapsed()
print_info('script took {:.2f} minutes to get this far.'.format(
time_elapsed()))
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception('Connection failed to the Delphix Engine'
'Please check the ERROR message:\n{}\n').format(e)
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception('A job failed in the Delphix Engine')
print_info('{} took {:.2f} minutes to get this far:\n{}\n'.format(
basename(__file__), elapsed_minutes, e))
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "helloruby"
actionName = "hello-ruby"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
__init__.py
|
"""
asyncio - package for asynchronous computing
Notes
=====
Asynchronous computing allows for delayed responses to function or method calls.
Decorator `async_method` adds an argument `callback` for the function which
handles the eventual result.
Example
-------
import datetime
import time
import urllib
from support.asyncio import async_method
@async_method
def query_SIMBAD(url):
return urllib.request.urlopen(url)
def SIMBAD_handler(result):
global response
print("\nhandler got", result, "at", datetime.datetime.now())
response = result
request = "http://simbad.u-strasbg.fr/simbad/sim-basic" +\
"?Ident=3C273&submit=SIMBAD+search"
print(datetime.datetime.now())
query_SIMBAD(request, callback=SIMBAD_handler)
for count in list(range(30)):
time.sleep(0.1)
print(".", end="")
print(response.readlines()[200:220], "at", datetime.datetime.now())
Doing this with a remote process depends on the way interprocess communications
are done. This package has two modules, for `pyro` and `flaskio`.
"""
import functools
import threading
def async_method(func):
@functools.wraps(func) # copy all the private attributes of 'func' to 'wrapper'
def wrapper(*args, **kwargs):
print("async_method:", func.__name__, "args:", args)
print("async_method:", func.__name__, "keyword args:",kwargs)
if 'callback' in kwargs:
callback = kwargs['callback']
kwargs.pop('callback')
# original unwrapped function
def orig_func(*args, **kwargs):
res = func(*args, **kwargs)
callback(res) # invoke callback when done
# have a thread execute the original function
mythread = threading.Thread(target=orig_func, args=args, kwargs=kwargs)
mythread.start()
else:
return func(*args, **kwargs)
return wrapper
|
producer.py
|
from kafka import KafkaProducer
import time, threading, random
import os
import configparser
from org.sfu.billing.simulator.module.datagenerator import generate
from org.sfu.billing.simulator.utility.data_loader import load_customer
def load_properties():
config_dir = os.environ.get('APP_HOME')
config_fileName = ('config.ini')
config_file = config_dir + os.sep + config_fileName
config = configparser.ConfigParser()
config.read(config_file)
return config
# messages per second
# creates kafka instance with server name mentioned in the definitions.py
# creates topic
# sets frequency of data ingestion
# gets the message from simulator
# sends to kafka
def test_streaming():
data = load_customer()
count = len(data)
while True:
index = random.randint(0, count - 1)
msg = generate(data[index])
print(msg)
def send_at(rate):
config = load_properties()
producer = KafkaProducer(bootstrap_servers=[config['KAFKA']['SERVER_IP']+':'+config['KAFKA']['SERVER_PORT']])
topic = config['KAFKA']['TOPIC_PREFIX'] + str(rate)
interval = 1 / rate
data = load_customer()
count = len(data)
while True:
index = random.randint(0, count-1)
msg = generate(data[index])
producer.send(topic, msg.encode('ascii'))
time.sleep(interval)
#uses thread to produce data for each frequency mentioned in the config.yaml
if __name__ == "__main__":
#test_streaming()
config = load_properties()
rates = map(int, config['KAFKA']['RATE'].split(" "))
#rates = [1,10,100]
for rate in rates:
server_thread = threading.Thread(target=send_at, args=(rate,))
server_thread.setDaemon(True)
server_thread.start()
while 1:
time.sleep(1)
|
cluster_model_final_v1.py
|
# coding: utf-8
import json
import Queue
import Levenshtein
import numpy as np
from scipy import spatial
import file_config
from xgboost_rank import *
from collections import Counter
from similarity import similarity, sent_distance, sent_ratio, sentence_sim, keyword_sim, vec_dict, vocab_set, is_mutual_sub
from utils import *
import multiprocessing
import os
from pprint import pprint
class Author(object):
def __init__(self, json_author):
# self.name = json_author['name'].lower()
# self.org = json_author.get('org', '').lower()
self.name = ' '.join([item.strip() for item in json_author['name'].lower().replace('-', ' ').split('.') if item != ''])
self.org = org_process(json_author.get('org', ''))
def original_info(self):
return {'name': self.name, 'org': self.org}
class Paper(object):
def __init__(self, index, name, json_paper):
self.index = index
self.cluster_index = index
self.name = name
self.id = json_paper.get('id', '')
self.title = json_paper.get('title', '').lower()
self.authors = [Author(json_author) for json_author in json_paper.get('authors', '')]
self.venue = json_paper.get('venue', '').lower()
self.year = json_paper.get('year', 0)
self.keywords = [word.lower() for word in json_paper.get('keywords', [])]
self.abstract = json_paper.get('abstract', '').lower()
self.names = [author.name for author in self.authors]
self.orgs = [author.org for author in self.authors]
self.author_dic = dict(zip(self.names, self.orgs))
self.org = self.author_dic.get(self.name, '')
# 当前作者所处位置
self.author_pos = 0
for pos, author_name in enumerate(self.names):
if author_name == name:
self.author_pos = pos
def original_info(self):
# return self.__dict__ # self.authors 无法显示出来
res = dict()
res['id'] = self.id
res['title'] = self.title
res['authors'] = [author.original_info() for author in self.authors]
res['venue'] = self.venue
res['year'] = self.year
res['keywords'] = self.keywords
res['abstract'] = self.abstract
return res
class Cluster(object):
def __init__(self, index, name, json_cluster=None):
self.index = index
self.name = name
# self.papers = [Paper(self.index, self.name, json_paper)]
self.papers = []
for json_paper in json_cluster:
self.papers.append(Paper(self.index, self.name, json_paper))
self.update_main()
# self.main_paper = self.papers[0]
# self.main_names = self.main_paper.names
# self.main_org = self.main_paper.org
def update_main(self): # update after merge
max_len = 0
max_len_id = 0
for i, paper in enumerate(self.papers):
if max_len < len(paper.org.split()):
max_len_id = i
max_len = len(paper.org.split())
self.main_paper = self.papers[max_len_id]
self.main_names = self.main_paper.names
self.main_org = self.main_paper.org
self.main_venue = self.main_paper.venue
self.main_title = self.main_paper.title
self.main_keywords = self.main_paper.keywords
self.index = self.main_paper.index
for paper in self.papers:
paper.cluster_index = self.index
def is_str_in_orgs(self, string):
orgs = [org for paper in self.papers for org in paper.orgs]
for org in orgs:
if string in org:
return True
return False
def output(self):
return [paper.id for paper in self.papers]
def original_info(self):
return [paper.original_info() for paper in self.papers]
class Person(object):
def __init__(self, name, json_person):
self.name = name.lower()
self.clusters = []
for index, json_paper in enumerate(json_person):
self.clusters.append(Cluster(index, self.name, [json_paper]))
self.cluster_dict = {}
for cluster in self.clusters:
self.cluster_dict[cluster.index] = cluster
def merge_cluster(self, cluster1_id, cluster2_id):
cluster1 = self.cluster_dict[cluster1_id]
cluster2 = self.cluster_dict[cluster2_id]
cluster1.papers.extend(cluster2.papers)
cluster1.update_main()
self.cluster_dict[cluster1.index] = cluster1
self.clusters.remove(cluster2)
def remove_paper_from_cluster(self, cluster_item, paper):
cluster_item.papers.remove(paper)
cluster_item.update_main()
self.cluster_dict[cluster_item.index] = cluster_item
paper.cluster_index = paper.index
new_cluster = Cluster(paper.index, self.name, [paper.original_info()])
self.clusters.append(new_cluster)
self.cluster_dict[new_cluster.index] = new_cluster
def co_author_run(self):
q = Queue.Queue(len(self.clusters))
for cluster in self.clusters:
q.put(cluster)
while not q.empty():
main_cluster = q.get()
not_merge_clusters = []
while not q.empty():
cluster = q.get()
# 含有缩写的name
if len(main_cluster.main_paper.name.split(" ")[0]) == 1:
# 单独规则
if main_cluster.is_str_in_orgs('physical') and cluster.is_str_in_orgs('hospital') \
or main_cluster.is_str_in_orgs('hospital') and cluster.is_str_in_orgs('physical'):
not_merge_clusters.append(cluster)
continue
if len(main_cluster.main_names) >= 20 and len(cluster.main_names) >= 20:
# 合并大列表
if (len(main_cluster.main_names) >= 200 or len(cluster.main_names) >= 200):
if (len(set(main_cluster.main_names) & set(cluster.main_names)) * 1.0 / len(
set(main_cluster.main_names) | set(cluster.main_names)) > 0.4 \
or len(set(main_cluster.main_names) & set(cluster.main_names)) >= 100):
# 合并大于阈值的大簇
self.merge_cluster(main_cluster.index, cluster.index)
continue
elif (len(set(main_cluster.main_names) & set(cluster.main_names)) > max(4, int(
0.2 * min(len(main_cluster.main_names), len(cluster.main_names))))):
# 按照公式合并,召回中等簇
self.merge_cluster(main_cluster.index, cluster.index)
continue
else:
if (len(set(main_cluster.main_names) & set(cluster.main_names)) > \
max(4, int(0.2 * min(len(main_cluster.main_names), len(cluster.main_names)))) \
and sent_distance(main_cluster.main_org, cluster.main_org) < 3):
# 两个重名作者且组织机构重合
self.merge_cluster(main_cluster.index, cluster.index)
continue
if (len(set(main_cluster.main_names) & set(cluster.main_names)) > 2 \
and sent_distance(main_cluster.main_org, cluster.main_org) < 3 \
and is_org_contain_keyword(main_cluster.main_org)):
# 两个重名作者且组织机构重合
self.merge_cluster(main_cluster.index, cluster.index)
continue
if (len(set(main_cluster.main_names) & set(cluster.main_names)) > \
max(2, int(0.1 * min(len(main_cluster.main_names), len(cluster.main_names)))) \
and main_cluster.main_venue == cluster.main_venue and main_cluster.main_venue != ""):
# 共同作者大于2且期刊名相同
self.merge_cluster(main_cluster.index, cluster.index)
continue
if (len(set(main_cluster.main_names) & set(cluster.main_names)) > \
max(2, int(0.1 * min(len(main_cluster.main_names), len(cluster.main_names)))) \
and sentence_sim(main_cluster.main_title, cluster.main_title) > 0.7 \
and keyword_sim(main_cluster.main_keywords, cluster.main_keywords) > 0.7):
# 共同作者大于2且title或者关键词相似度大于0.7
self.merge_cluster(main_cluster.index, cluster.index)
continue
elif (len(main_cluster.main_names) < 20 and len(cluster.main_names) < 20):
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and (sent_distance(main_cluster.main_org, cluster.main_org) < 3):
# 两个重名作者且组织机构重合
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > \
max(2, 0.3 * min(len(main_cluster.main_names), len(cluster.main_names))):
if not (main_cluster.main_org == 'national key laboratory transient optic photonic' \
and cluster.main_org == 'pathogen biologicalbiologicalbiologicalbiological' \
or main_cluster.main_org == 'pathogen biologicalbiologicalbiologicalbiological'
and cluster.main_org == 'national key laboratory transient optic photonic'):
self.merge_cluster(main_cluster.index, cluster.index)
continue # 小于20个人之中的多于两个共同作者的人
# 小于20个人之中的多于两个共同作者的人
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and main_cluster.main_venue == cluster.main_venue and main_cluster.main_venue != "":
if not (main_cluster.main_org == 'neurologicalneurologicalneurologicalneurological fuzhou dongfang hospital fujian medical university' \
and cluster.main_org == 'northwest a f university' \
or main_cluster.main_org == 'northwest a f university'
and cluster.main_org == 'neurologicalneurologicalneurologicalneurological fuzhou dongfang hospital fujian medical university'):
# 共同作者大于2且期刊名相同
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and sentence_sim(main_cluster.main_title, cluster.main_title) > 0.7 \
and (keyword_sim(main_cluster.main_keywords, cluster.main_keywords) > 0.7):
# 共同作者大于2且title或者关键词相似度大于0.7
self.merge_cluster(main_cluster.index, cluster.index)
continue
else:
if (len(main_cluster.main_names) + len(cluster.main_names) < 100 and len(
set(main_cluster.main_names) & set(cluster.main_names)) > 3):
self.merge_cluster(main_cluster.index, cluster.index)
continue
not_merge_clusters.append(cluster)
else:
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and (sent_distance(main_cluster.main_org, cluster.main_org) < 3 \
or is_mutual_sub(main_cluster.main_org, cluster.main_org)):
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 2 \
and len(main_cluster.main_names) < 20 and len(cluster.main_names) < 20:
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 3:
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and main_cluster.main_venue == cluster.main_venue and main_cluster.main_venue != "":
self.merge_cluster(main_cluster.index, cluster.index)
continue
if len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
and sentence_sim(main_cluster.main_title, cluster.main_title) > 0.7 \
and keyword_sim(main_cluster.main_keywords, cluster.main_keywords) > 0.7:
self.merge_cluster(main_cluster.index, cluster.index)
continue
not_merge_clusters.append(cluster)
# if (len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
# and (sent_distance(main_cluster.main_org, cluster.main_org) < 4)) \
# or (len(set(main_cluster.main_names) & set(cluster.main_names)) > 2 \
# and len(main_cluster.main_names) < 20 and len(cluster.main_names) < 20) \
# or (len(set(main_cluster.main_names) & set(cluster.main_names)) > 1 \
# and main_cluster.main_venue == cluster.main_venue and main_cluster.main_venue != "") \
# or (len(set(main_cluster.main_names) & set(cluster.main_names)) > 1
# and sentence_sim(main_cluster.main_title, cluster.main_title) > 0.7
# and ((keyword_sim(main_cluster.main_keywords, cluster.main_keywords) > 0.7
# and len(main_cluster.main_keywords) != 0 and len(cluster.main_keywords) != 0)
# or len(main_cluster.main_keywords) == 0 or len(cluster.main_keywords) == 0)):
#
# self.merge_cluster(main_cluster.index, cluster.index)
# else:
# not_merge_clusters.append(cluster)
for cluster in not_merge_clusters:
q.put(cluster)
def co_author_second_run(self):
q = Queue.Queue(len(self.clusters))
for cluster in self.clusters:
q.put(cluster)
while not q.empty():
main_cluster = q.get()
not_merge_clusters = []
while not q.empty():
cluster = q.get()
# 含有缩写的name
if len(main_cluster.main_paper.name.split(" ")[0]) == 1:
pass
else:
if self.is_author_same(main_cluster, cluster):
self.merge_cluster(main_cluster.index, cluster.index)
else:
not_merge_clusters.append(cluster)
for cluster in not_merge_clusters:
q.put(cluster)
def org_run(self):
no_information_num = 0
q = Queue.Queue(len(self.clusters))
for cluster in self.clusters:
q.put(cluster)
while not q.empty():
main_cluster = q.get()
while main_cluster.main_org == '' and not q.empty():
main_cluster = q.get()
not_merge_clusters = []
while not q.empty():
cluster = q.get()
if cluster.main_org == '':
# print("Waring: no org information!!")
no_information_num += 1
continue
# 含有缩写的name
if len(main_cluster.main_paper.name.split(" ")[0]) == 1:
if sent_distance(main_cluster.main_org, cluster.main_org) < 3:
self.merge_cluster(main_cluster.index, cluster.index)
else:
not_merge_clusters.append(cluster)
else:
if self.is_org_same(main_cluster.main_org, cluster.main_org) \
or self.is_org_author_same(main_cluster, cluster):
self.merge_cluster(main_cluster.index, cluster.index)
else:
not_merge_clusters.append(cluster)
for cluster in not_merge_clusters:
q.put(cluster)
print("Number of no org information is:", no_information_num)
def is_author_same(self, current_cluster, other_cluster):
"""
判断两个簇是否有co_author大于2的paper
:param current_cluster:
:param other_cluster:
:return:
"""
is_merge = False
for current_paper in current_cluster.papers:
for other_paper in other_cluster.papers:
if len(set(current_paper.names) & set(other_paper.names)) > 2 \
and len(current_paper.names) < 20 and len(other_paper.names) < 20:
is_merge = True
break
if len(set(current_paper.names) & set(other_paper.names)) == 3 \
and (sent_distance(current_paper.org, other_paper.org) < 3 \
or is_mutual_sub(current_paper.org, other_paper.org) \
or current_paper.venue == other_paper.venue):
is_merge = True
break
if len(set(current_paper.names) & set(other_paper.names)) > 3:
is_merge = True
break
if is_merge:
break
return is_merge
def is_org_author_same(self, current_cluster, other_cluster):
"""
判断当前两个cluster中是否有org相同且co_author也相同的paper
:param current_cluster:
:param other_cluster:
:return:
"""
is_merge = False
for current_paper in current_cluster.papers:
for other_paper in other_cluster.papers:
if sent_distance(current_paper.org, other_paper.org) < 3 \
and len(set(current_paper.names) & set(other_paper.names)) > 1 \
and max(len(current_cluster.papers), len(other_cluster.papers)) < 100 \
and len(current_cluster.papers) * len(other_cluster.papers) < 200:
is_merge = True
break
if sent_distance(current_paper.org, other_paper.org) < 3 \
and len(set(current_paper.names) & set(other_paper.names)) > 1 \
and len(current_paper.org.split()) > 4:
is_merge = True
break
if is_merge:
break
return is_merge
def is_org_same(self, current_org, other_org):
"""
判断org是否相等
:param current_cluster:
:param other_cluster:
:return:
"""
is_org = False
if sent_distance(current_org, other_org) < 3 \
and ((len(current_org.split()) > 3 \
and len(other_org.split()) > 3)
or is_org_contain_keyword(current_org)):
is_org = True
return is_org
def combine_cluster(self):
"""
合并现有的簇
:param xgboost_model:
:return:
"""
q = Queue.Queue(len(self.clusters))
# 对当前簇按照从大到小顺序进行排序
cluster_dict = {}
for index, cluster in enumerate(self.clusters):
cluster_dict[index] = [len(cluster.papers), cluster]
sort_cluster_list = sorted(cluster_dict.items(), key=lambda x: x[1][0], reverse=True)
sort_cluster_list = [cluster_pair[1][1] for cluster_pair in sort_cluster_list]
for cluster_item in sort_cluster_list:
q.put(cluster_item)
while not q.empty():
current_cluster = q.get()
# 单篇文章的簇不合并
while len(current_cluster.papers) == 1 and not q.empty():
current_cluster = q.get()
not_merge_clusters = []
while not q.empty():
other_cluster = q.get()
# 不考虑与单篇文章的簇合并
if len(other_cluster.papers) == 1:
continue
short_org_same_num, long_org_same_num, same_org_num, same_org_ratio, \
co_author_num, same_venue_num = self.cluster_sim(current_cluster, other_cluster)
# 含有缩写的name
if len(current_cluster.main_paper.name.split(" ")[0]) == 1:
co_author_num = self.abbr_cluster_co_author_num(current_cluster, other_cluster)
if long_org_same_num > 3:
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if short_org_same_num > 3 and co_author_num > 3 \
and (len(current_cluster.papers) < 20 or len(other_cluster.papers) < 20) \
and (float(short_org_same_num) / len(current_cluster.papers) > 0.15 \
or float(co_author_num) / len(current_cluster.papers) > 0.15 \
or short_org_same_num > len(current_cluster.papers)):
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if short_org_same_num > 4 and co_author_num > 10 \
and (len(current_cluster.papers) > 20 and len(other_cluster.papers) > 20):
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if same_org_ratio > 0.4 and len(other_cluster.papers) < 9 \
and len(current_cluster.papers) * len(other_cluster.papers) < 300:
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
else:
not_merge_clusters.append(other_cluster)
else:
if long_org_same_num > 3:
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if short_org_same_num > 3 and co_author_num > 2 \
and (len(current_cluster.papers) < 20 or len(other_cluster.papers) < 20) \
and (float(short_org_same_num)/len(current_cluster.papers) > 0.15 \
or float(co_author_num)/len(current_cluster.papers) > 0.15 \
or short_org_same_num > len(current_cluster.papers)):
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if short_org_same_num > 3 and co_author_num > 10 \
and (len(current_cluster.papers) > 20 and len(other_cluster.papers) > 20):
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
if same_org_ratio > 0.4 and len(other_cluster.papers) < 9 \
and len(current_cluster.papers) * len(other_cluster.papers) < 300:
# print("short_org_same_num:{0}".format(short_org_same_num))
# print("long_org_same_num:{0}".format(long_org_same_num))
# print("co_author_num:{0}".format(co_author_num))
# print(len(current_cluster.papers))
# print(len(other_cluster.papers))
# print(current_cluster.main_org)
# print(other_cluster.main_org)
# print("\n" * 3)
self.merge_cluster(current_cluster.index, other_cluster.index)
continue
else:
not_merge_clusters.append(other_cluster)
# if same_org_num > 3 or same_org_ratio > 0.4 \
# or (co_author_num > 3 and same_venue_num > 1 \
# and max(len(current_cluster.papers), len(other_cluster.papers)) < 150):
# # or (co_author_num > 2 and same_org_num > 2 \
# # and max(len(current_cluster.papers), len(other_cluster.papers)) < 100) \
# # or (co_author_num > 2 and same_venue_num > 1 and same_org_num > 1 \
# # and max(len(current_cluster.papers), len(other_cluster.papers)) < 100) \
# # or (same_venue_num > 1 and same_org_num > 2 \
# # and len(current_cluster.papers) < 40 and len(other_cluster.papers) < 40) \
# # or (len(current_cluster.papers) < 7 and len(other_cluster.papers) < 7 \
# # and ((co_author_num > 1 and same_venue_num > 0) \
# # or (co_author_num > 1 and same_org_num > 0) \
# # or (same_org_num > 1 and same_venue_num > 0))):
# # print "cluster index:{0}, {1}".format(current_cluster.index, other_cluster.index)
# self.merge_cluster(current_cluster.index, other_cluster.index)
# else:
# not_merge_clusters.append(other_cluster)
for cluster in not_merge_clusters:
q.put(cluster)
def cluster_sim(self, current_cluster, other_cluster):
"""
计算cluster之间org重合度
:param current_cluster:
:param other_cluster:
:return:
"""
current_org_list = []
current_venue_set = set()
for paper in current_cluster.papers:
if paper.org != "":
current_org_list.append(paper.org)
if paper.venue != "":
current_venue_set.add(paper.venue)
other_org_list = []
other_venue_set = set()
for paper in other_cluster.papers:
if paper.org != "":
other_org_list.append(paper.org)
if paper.venue != "":
other_venue_set.add(paper.venue)
same_org_ratio = 0.0
short_org_same_num = 0
long_org_same_num = 0
same_org_num = 0
co_author_num = 0
same_venue_num = 0
# 对文章数多的簇遍历
if len(current_cluster.papers) >= len(other_cluster.papers):
other_org_set = set(other_org_list)
for current_org in current_org_list:
if (len(current_org.split()) < 4 and not is_org_contain_keyword(current_org)) and current_org in other_org_set:
short_org_same_num += 1
continue
if (len(current_org.split()) > 3 or is_org_contain_keyword(current_org)) and current_org in other_org_set:
if is_org_special(current_org) and current_cluster.name == "meng wang":
short_org_same_num += 1
else:
long_org_same_num += 1
for current_paper in current_cluster.papers:
if current_paper.venue in other_venue_set:
same_venue_num += 1
for other_paper in other_cluster.papers:
if len(set(current_paper.names) & set(other_paper.names)) > 1 \
and abs(len(current_paper.names) - len(other_paper.names)) < 8:
co_author_num += 1
else:
current_org_set = set(current_org_list)
for other_org in other_org_list:
if (len(other_org.split()) < 4 and not is_org_contain_keyword(other_org)) and other_org in current_org_set:
short_org_same_num += 1
continue
if (len(other_org.split()) > 3 or is_org_contain_keyword(other_org)) and other_org in current_org_set:
if is_org_special(other_org) and current_cluster.name == "meng wang":
short_org_same_num += 1
else:
long_org_same_num += 1
for other_paper in other_cluster.papers:
if other_paper.venue in current_venue_set:
same_venue_num += 1
for current_paper in current_cluster.papers:
if len(set(current_paper.names) & set(other_paper.names)) > 1 \
and abs(len(current_paper.names) - len(other_paper.names)) < 8:
co_author_num += 1
same_org_num = short_org_same_num + long_org_same_num
if len(current_org_list) + len(other_org_list) != 0:
same_org_ratio = float(same_org_num) / \
(len(current_org_list) + len(other_org_list))
return [short_org_same_num, long_org_same_num, same_org_num, same_org_ratio, co_author_num, same_venue_num]
def abbr_cluster_co_author_num(self, current_cluster, other_cluster):
"""
计算简称簇之间的共同作者数
:return:
"""
short_co_author_num = 0
long_co_author_num = 0
# 对文章数多的簇遍历
if len(current_cluster.papers) >= len(other_cluster.papers):
for current_paper in current_cluster.papers:
for other_paper in other_cluster.papers:
if len(set(current_paper.names) & set(other_paper.names)) > 1 \
and abs(len(current_paper.names) - len(other_paper.names)) < 8 \
and max(len(current_paper.names), len(other_paper.names)) < 20:
short_co_author_num += 1
if len(set(current_paper.names) & set(other_paper.names)) > \
max(4, int(0.2 * min(len(current_paper.names), len(other_paper.names))))\
and abs(len(current_paper.names) - len(other_paper.names)) < 8 \
and min(len(current_paper.names), len(other_paper.names)) > 20:
long_co_author_num += 1
else:
for other_paper in other_cluster.papers:
for current_paper in current_cluster.papers:
if len(set(current_paper.names) & set(other_paper.names)) > 1 \
and abs(len(current_paper.names) - len(other_paper.names)) < 8 \
and max(len(current_paper.names), len(other_paper.names)) < 16:
short_co_author_num += 1
if len(set(current_paper.names) & set(other_paper.names)) > \
max(5, int(0.2 * min(len(current_paper.names), len(other_paper.names))))\
and abs(len(current_paper.names) - len(other_paper.names)) < 8 \
and min(len(current_paper.names), len(other_paper.names)) > 20:
long_co_author_num += 1
return short_co_author_num + long_co_author_num
def combine_small(self):
"""
合并小簇
:return:
"""
single_paper_clusters = [cluster for cluster in self.clusters if len(cluster.papers) == 1]
not_single_paper_clusters = [cluster for cluster in self.clusters if len(cluster.papers) > 1]
not_single_paper_clusters = sorted(not_single_paper_clusters, key=lambda x: len(x.papers))
small_paper_clusters = [cluster for cluster in self.clusters if len(cluster.papers) < 5]
for single_cluster in single_paper_clusters:
is_merged = False
main_paper = single_cluster.papers[0]
for not_single_cluster in not_single_paper_clusters:
# sing_paper_cluster.org 和 大 cluster.main_org
if sent_distance(main_paper.org, not_single_cluster.main_org) < 3 \
and len(main_paper.org.split()) > 2:
self.merge_cluster(not_single_cluster.index, single_cluster.index)
break
if len(single_cluster.name.split()[0]) == 1:
continue
for paper in not_single_cluster.papers:
# 根据 co_author 合并单个的簇与大
if len(set(main_paper.names) & set(paper.names)) > 1 \
and ((sentence_sim(main_paper.title, paper.title) > 0.7 \
and keyword_sim(main_paper.keywords, paper.keywords) > 0.7) \
or (main_paper.venue == paper.venue and main_paper.venue != "")):
self.merge_cluster(not_single_cluster.index, single_cluster.index)
is_merged = True
break
if is_merged:
break
def combine_other(self, xgboost_model):
"""
将other中的cluster加入到大簇中
:param xgboost_model:
:return:
"""
tmp_clusters = [cluster for cluster in self.clusters]
# 读取所有main_paper
main_papers = [cluster.main_paper for cluster in tmp_clusters]
count = 0
for cluster in tmp_clusters:
if len(self.cluster_dict[cluster.index].papers) == 1:
# count += 1
# if count % 50 == 0:
# print count
current_paper = cluster.papers[0]
other_paper_list = []
feas_list = []
for paper in main_papers:
if paper.id != current_paper.id:
other_paper_list.append(paper)
# 计算两篇paper相似度
feas = similarity(current_paper, paper)
feas_list.append(feas)
dtest = xgb.DMatrix(feas_list)
dtest.set_group([len(feas_list)])
preds = xgboost_model.predict(dtest).tolist()
# 统计前3个paper对应的cluster
pred_dict = {}
for i, val in enumerate(preds):
pred_dict[i] = val
sort_pred_list = sorted(pred_dict.items(), key=lambda x: x[1])
# pred_index_list = [ele[0] for ele in sort_pred_list[:3]]
# pre_fea_list = [feas_list[index] for index in pred_index_list]
# pred_cluster_indexs = [other_paper_list[index].cluster_index for index in pred_index_list]
# 合并前过滤
if (current_paper.org != "" and feas_list[sort_pred_list[0][0]][0] < 0.5 and sort_pred_list[0][1] > -2.8) \
or (current_paper.org == "" and sort_pred_list[0][1] > -2.3):
# print "nocombine:{0}, fea:{1}".format(sort_pred_list[0][1], feas_list[sort_pred_list[0][0]])
continue
# print "combine:{0}, fea:{1}".format(sort_pred_list[0][1], feas_list[sort_pred_list[0][0]])
# print "cluster index:{0}, {1}".format(other_paper_list[sort_pred_list[0][0]].cluster_index, cluster.index)
self.merge_cluster(other_paper_list[sort_pred_list[0][0]].cluster_index, cluster.index)
# 选择当前paper应该加入的簇
# if len(set(pred_cluster_indexs)) < len(pred_cluster_indexs):
# self.merge_cluster(cluster_dict[Counter(pred_cluster_indexs).most_common(1)[0][0]], cluster)
# else:
# self.merge_cluster(cluster_dict[pred_cluster_indexs[0]], cluster)
def save(self, path):
if not os.path.exists(path):
os.makedirs(path)
idx = 0
for cluster in self.clusters:
if len(cluster.papers) > 1:
res = cluster.original_info()
# json.dump(res, open('tmp_cluster/' + str(len(cluster.papers)) + '_' + str(idx) + '.json', 'w'), indent=4)
json.dump(res, open(path + str(len(cluster.papers)) + '_' + str(idx) + '.json', 'w'), indent=4)
idx += 1
res = [cluster.original_info() for cluster in self.clusters if len(cluster.papers) == 1]
json.dump(res, open(path + '1.json', 'w'), indent=4)
def worker(i, name):
print(i)
print(name)
person = Person(' '.join(name.split('_')), valid_data[name])
print(len(person.clusters))
person.co_author_run()
person.co_author_second_run()
person.org_run()
person.combine_cluster()
person.combine_cluster()
person.combine_small()
person.combine_other(rank_model)
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
res = {}
res[name] = [cluster.output() for cluster in person.clusters]
json.dump(res, open('result_dir/result' + str(i) + '.json', 'w'), indent=4)
if __name__ == '__main__':
# data = json.load(open('/Users/coder352/datasets/Entity/Name_Disambiguation/Scholar2018/pubs_train.json'))
# test_data = json.load(open(file_config.test_data_path))
# xgboost_rank = XgboostRank(file_config.xgboost_model_path)
# rank_model_model = xgboost_rank.load_rank_model()
# person = Person('qin zhang', test_data['qin_zhang'])
# print(len(person.clusters))
#
# print(person.name)
# print(person.clusters[0].papers[0].title)
# print(person.clusters[0].main_paper.id)
#
# person.co_author_run()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/co_author_run/')
#
# person.co_author_second_run()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/co_author_second_run/')
#
# person.org_run()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/org_run/')
#
# person.combine_cluster()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/combine_cluster1/')
#
# person.combine_cluster()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/combine_cluster2/')
#
# person.combine_small()
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
# person.save('tmp_cluster/' + person.name + '/combine_small/')
# person.combine_other(rank_model_model)
# print(len(person.clusters))
# print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
################################################################
# write one cluster into one file
# import os
# if not os.path.exists("tmp_cluster/"):
# os.makedirs("tmp_cluster/")
# idx = 0
# for cluster in person.clusters:
# res = cluster.original_info()
# # json.dump(res, open('tmp_cluster/' + str(len(cluster.papers)) + '_' + str(cluster.index) + "_" + str(idx) + '.json', 'w'), indent=4)
#
# if len(cluster.papers) > 1:
# json.dump(res, open('tmp_cluster/' + str(len(cluster.papers)) + '_' + str(idx) + '.json', 'w'), indent=4)
# idx += 1
#
# res = [cluster.original_info() for cluster in person.clusters if len(cluster.papers) == 1]
# json.dump(res, open('tmp_cluster/1.json', 'w'), indent=4)
##################################################################
# For test data
xgboost_rank = XgboostRank(file_config.xgboost_model_path)
rank_model = xgboost_rank.load_rank_model()
test_data = json.load(open(file_config.test_data_path))
res = {}
for i, name in enumerate(test_data.keys()):
person = Person(' '.join(name.split('_')), test_data[name])
print(i)
print(person.name)
print(len(person.clusters))
print(person.clusters[0].main_paper.id)
person.co_author_run()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.co_author_second_run()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.org_run()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.combine_cluster()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.combine_cluster()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.combine_small()
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
person.combine_other(rank_model)
print(len(person.clusters))
print(sorted(Counter([len(cluster.papers) for cluster in person.clusters]).items(), key=lambda x: x[0]))
res[name] = [cluster.output() for cluster in person.clusters]
json.dump(res, open('result.json', 'w'), indent=4)
##################################################################
# for Valid data
# if not os.path.exists("result_dir/"):
# os.makedirs("result_dir/")
# valid_data = json.load(open(file_config.validate_data_path))
# xgboost_rank = XgboostRank(file_config.xgboost_model_path)
# rank_model = xgboost_rank.load_rank_model()
#
# for i, name in enumerate(valid_data.keys()):
# if len(name.split("_")[0]) == 1:
# continue
#
# p = multiprocessing.Process(target=worker, args=(i, name))
# p.start()
# res = {}
# dir_path = "result_dir"
# file_list = os.listdir(dir_path) # 列出文件夹下所有的目录与文件
#
# for i in range(len(file_list)):
# file_path = os.path.join(dir_path, file_list[i])
# name_json = json.load(open(file_path))
# res[list(name_json)[0]] = name_json[list(name_json)[0]]
#
# json.dump(res, open('result.json', 'w'), indent=4)
|
actionproxy.py
|
#
# ActionProxy base class
#
import time
from threading import Thread
import rospy
from std_msgs.msg import String
'''
pnp_ros publishes a String topic named `pnp/action_str` in the form
[<robotname>#]<actionname>[_<params>].<command>
Action executors should listen to this message and execute the corresponding action. Programmer is responsible for ensuring that only one executor would take care of execution of an action.
Test with CLI
rostopic pub pnp/action_str std_msgs/String "data:'wait_10.start'"
Quit all action proxies with
rostopic pub pnp/action_str std_msgs/String "data: '%quit_server'" --once
'''
# topic for subscribers
TOPIC_PNPACTIONPROXY_STR = "pnp/action_str"
# topic for publishers
TOPIC_PNPACTIONCMD = "pnp/actionCmd"
class ActionProxy:
def __init__(self, actionname):
self.do_run = False
self.athread = None
self.actionname = actionname
# init ROS node
nodename = actionname+"_actionproxy"
rospy.init_node(nodename, disable_signals=True)
# subscribers
self.actionproxy_sub = rospy.Subscriber(TOPIC_PNPACTIONPROXY_STR, String, self.actionproxy_cb)
# publishers
self.actioncmd_pub = rospy.Publisher(TOPIC_PNPACTIONCMD, String, queue_size=1)
def __del__(self):
# just in case
self.end()
def actionproxy_cb(self, data):
sdata = data.data
if ('%quit_server' in sdata):
self.quit_server()
return
robot = None
action = None
params = None
command = None
v = sdata.split('#')
if len(v)>1:
robot = v[0]
sdata = v[1]
v = sdata.split('.')
if len(v)!=2:
raise Exception("ActionProxy: wrong format in %s [%s]" %(TOPIC_PNPACTIONPROXY_STR, sdata))
command = v[1]
k = v[0].find('_')
if (k<0):
action = v[0]
else:
action = v[0][0:k]
params = v[0][k+1:]
if action==self.actionname:
print("robot: %s action: %s params: %s command: %s" \
%(robot, action, params, command))
if command=='start':
self.start(params)
elif command=='interrupt':
self.interrupt()
elif command=='end':
self.end()
else:
print("ActionProxy: wrong command %s" %(command))
# start the action monitor thread / non-blocking
def start(self, params=None):
if self.athread != None:
self.end()
self.do_run = True
self.athread = Thread(target=self.action_thread, args=(params,))
self.athread.start()
def interrupt(self):
self.end()
def end(self):
self.do_run = False
if self.athread != None:
self.athread.join()
self.athread = None
def isRunning(self):
self.do_run = self.athread != None and self.athread.is_alive()
return self.do_run
# exec the action / blocking, CTRL-C to interrupt
def execute(self, params):
self.start(params)
while (self.isRunning()):
try:
rospy.sleep(1)
except KeyboardInterrupt:
self.interrupt()
self.end()
def run_server(self): # keep the server running -> actions managed by actionproxy_cb
print("ActionProxy %s running ..." %(self.actionname))
rate = rospy.Rate(1)
self.server_run = True
while not rospy.is_shutdown() and self.server_run:
try:
rate.sleep()
except KeyboardInterrupt:
print("ActionProxy %s - user interrupt" %(self.actionname))
server_run = False
print("ActionProxy %s quit" %(self.actionname))
def quit_server(self):
self.server_run = False
# to be defined by specific ActionProxy class
def action_thread(self, params):
pass
|
c31.py
|
"""
Implement and break HMAC-SHA1 with an artificial timing leak
"""
import sys # isort:skip
from pathlib import Path # isort:skip
sys.path.append(str(Path(__file__).parent.resolve().parent))
import logging
from secrets import token_bytes
from threading import Thread
from time import sleep, time
import requests
from flask import Flask, Response, request
from set4.c28 import sha1
# Shut Flask and Werkzeug up.
wzlogger = logging.getLogger("werkzeug")
wzlogger.disabled = True
def hmac_sha1(key: bytes, message: bytes):
block_size = 64
if len(key) > block_size:
key = sha1(key)
elif len(key) < block_size:
key += b"\x00" * (len(key) - block_size)
o_key_pad = bytes([k ^ 0x5C for k in key])
i_key_pad = bytes([k ^ 0x36 for k in key])
return sha1(o_key_pad + sha1(i_key_pad + message))
def start_webserver(comparison_function, secret_key):
def inner():
app = Flask("vulnerable hmac server :(")
@app.route("/test", methods=["POST"])
def recv_file():
file = bytes.fromhex(request.args["file"])
user_sig = bytes.fromhex(request.args["signature"])
correct_sig = hmac_sha1(secret_key, file)
if comparison_function(user_sig, correct_sig):
return Response("1", 200)
return Response("0", 500)
app.run(debug=True, use_reloader=False)
return inner
def insecure_compare(sig1, sig2):
if len(sig1) != len(sig2):
return False
for c1, c2 in zip(sig1, sig2):
sleep(0.05)
if c1 != c2:
return False
return True
def crack_mac_for_any_file(file):
print("\nCracking MAC...")
mac = b""
for _ in range(20):
times = []
for byte in [bytes([i]) for i in range(256)]:
padding = b"\x00" * (20 - (len(mac) + 1))
start_time = time()
r = requests.post(
"http://localhost:5000/test",
params={"file": file.hex(), "signature": (mac + byte + padding).hex()},
)
end_time = time()
# Allow for some error.
times.append((byte, end_time - start_time))
byte, longest_time = sorted(times, key=lambda v: v[1], reverse=True)[0]
assert longest_time > (len(mac) + 1.5) * 0.05
print(f"Found a byte of the mac: {byte.hex()}")
mac += byte
assert r.status_code == 200 # Assert that the last MAC was valid.
return mac
if __name__ == "__main__":
secret_key = token_bytes(64)
print("Starting webserver.")
Thread(target=start_webserver(insecure_compare, secret_key)).start()
sleep(1) # Give the webserver time to spin up...
file = token_bytes(24)
print("\nThe file is:")
print(file)
print("\nThe secret key is:")
print(secret_key.hex())
print("\nThe MAC is:")
print(hmac_sha1(secret_key, file).hex())
mac = crack_mac_for_any_file(file)
print("\nFound full MAC:")
print(mac.hex())
|
SetAlertToFriends.py
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from wxpy import *
from requests import get
from requests import post
from platform import system
from os import chdir
from random import choice
from threading import Thread
import configparser
import time
import sys
# 获取每日励志精句
def get_message():
r = get("http://open.iciba.com/dsapi/")
note = r.json()['note']
content = r.json()['content']
return note,content
# 发送消息给她
def send_message(your_message):
try:
# 对方的微信名称
my_friend = bot.friends().search(my_lady_wechat_name)[0]
# 发送消息给对方
my_friend.send(your_message)
except:
# 出问题时,发送信息到文件传输助手
bot.file_helper.send(u"守护女友出问题了,赶紧去看看咋回事~")
# 在规定时间内进行关心她操作
def start_care():
# 待发送的内容,先置为空
message = ""
# 来个死循环,24小时关心她
while(True):
# 提示
print("守护中,时间:%s"% time.ctime())
# 每天定时问候,早上起床,中午吃饭,晚上吃饭,晚上睡觉
# 获取时间,只获取时和分,对应的位置为倒数第13位到倒数第8位
now_time = time.ctime()[-13:-8]
if (now_time == say_good_morning):
# 随机取一句问候语
message = choice(str_list_good_morning)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友早上起床:%s" % time.ctime())
elif (now_time == say_good_lunch):
message = choice(str_list_good_lunch)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友中午吃饭:%s" % time.ctime())
elif (now_time == say_good_dinner):
message = choice(str_list_good_dinner)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上吃饭:%s" % time.ctime())
elif (now_time == say_good_dream):
# 是否在结尾加上每日学英语
if(flag_learn_english):
note, content = get_message()
message = choice(str_list_good_dream) + "\n\n" + "顺便一起来学英语哦:\n" + "原文: " + content + "\n\n翻译: " + note
else:
message = choice(str_list_good_dream)
# 是否加上随机表情
if(flag_wx_emoj):
message = message + choice(str_list_emoj)
send_message(message)
print("提醒女友晚上睡觉:%s" % time.ctime())
# 节日问候语
festival_month = time.strftime('%m', time.localtime())
festival_day = time.strftime('%d', time.localtime())
if(festival_month == '02' and festival_day == '14' and now_time == "08:00"):
send_message(str_Valentine)
print("发送情人节祝福:%s" % time.ctime())
elif(festival_month == '03' and festival_day == '08' and now_time == "08:00"):
send_message(str_Women)
print("发送三八妇女节祝福:%s" % time.ctime())
elif(festival_month == '12' and festival_day == '24' and now_time == "00:00"):
send_message(str_Christmas_Eve)
print("发送平安夜祝福:%s" % time.ctime())
elif(festival_month == '12' and festival_day == '25' and now_time == "00:00"):
send_message(str_Christmas)
print("发送圣诞节祝福:%s" % time.ctime())
# 生日问候语
if(festival_month == birthday_month and festival_day == birthday_day and now_time == "00:00"):
send_message(str_birthday)
print("发送生日祝福:%s" % time.ctime())
# 每60秒检测一次
time.sleep(60)
if __name__ == "__main__":
# 若发现读取取配置文件出错,可以取消注释下面这行,一般在pycharm环境下才需要增加
# 设置当前文件所在的目录为当前工作路径
# chdir(sys.path[0])
# 启动微信机器人,自动根据操作系统执行不同的指令
# windows系统或macOS Sierra系统使用bot = Bot()
# linux系统或macOS Terminal系统使用bot = Bot(console_qr=2)
if('Windows' in system()):
# Windows
bot = Bot()
elif('Darwin' in system()):
# MacOSX
bot = Bot()
elif('Linux' in system()):
# Linux
bot = Bot(console_qr=2,cache_path=True)
else:
# 自行确定
print("无法识别你的操作系统类型,请自己设置")
# 读取配置文件
cf = configparser.ConfigParser()
cf.read("./config.ini",encoding='UTF-8')
# 设置女友的微信名称,记住,不是微信ID也不是微信备注
# 你女友的微信名称,记住,不是微信ID也不是微信备注
my_lady_wechat_name = cf.get("configuration", "my_lady_wechat_name")
# 设置早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间
say_good_morning = cf.get("configuration", "say_good_morning")
say_good_lunch = cf.get("configuration", "say_good_lunch")
say_good_dinner = cf.get("configuration", "say_good_dinner")
say_good_dream = cf.get("configuration", "say_good_dream")
# 设置女友生日信息
# 几月,注意补全数字,为两位数,比如6月必须写成06
birthday_month = cf.get("configuration", "birthday_month")
# 几号,注意补全数字,为两位数,比如6号必须写成08
birthday_day = cf.get("configuration", "birthday_day")
# 读取早上起床时间,中午吃饭时间,下午吃饭时间,晚上睡觉时间的随机提示语
# 一般这里的代码不要改动,需要增加提示语可以自己打开对应的文件修改
#早上起床问候语列表,数据来源于新浪微博
str_list_good_morning = ''
with open("./remind_sentence/sentence_good_morning.txt", "r",encoding='UTF-8') as f:
str_list_good_morning = f.readlines()
print(str_list_good_morning)
#中午吃饭问候语列表,数据来源于新浪微博
str_list_good_lunch = ''
with open("./remind_sentence/sentence_good_lunch.txt", "r",encoding='UTF-8') as f:
str_list_good_lunch = f.readlines()
print(str_list_good_lunch)
#晚上吃饭问候语列表,数据来源于新浪微博
str_list_good_dinner = ''
with open("./remind_sentence/sentence_good_dinner.txt", "r",encoding='UTF-8') as f:
str_list_good_dinner = f.readlines()
print(str_list_good_dinner)
#晚上睡觉问候语列表,数据来源于新浪微博
str_list_good_dream = ''
with open("./remind_sentence/sentence_good_dream.txt", "r",encoding='UTF-8') as f:
str_list_good_dream = f.readlines()
print(str_list_good_dream)
# 设置晚上睡觉问候语是否在原来的基础上再加上每日学英语精句
# False表示否 True表示是
if((cf.get("configuration", "flag_learn_english")) == '1'):
flag_learn_english = True
else:
flag_learn_english = False
print(flag_learn_english)
# 设置所有问候语结束是否加上表情符号
# False表示否 True表示是
str_emoj = "(•‾̑⌣‾̑•)✧˖°----(๑´ڡ`๑)----(๑¯ิε ¯ิ๑)----(๑•́ ₃ •̀๑)----( ∙̆ .̯ ∙̆ )----(๑˘ ˘๑)----(●′ω`●)----(●・̆⍛・̆●)----ಥ_ಥ----_(:qゝ∠)----(´;ω;`)----( `)3')----Σ((( つ•̀ω•́)つ----╰(*´︶`*)╯----( ´´ิ∀´ิ` )----(´∩`。)----( ื▿ ื)----(。ŏ_ŏ)----( •ิ _ •ิ )----ヽ(*΄◞ิ౪◟ิ‵ *)----( ˘ ³˘)----(; ´_ゝ`)----(*ˉ﹃ˉ)----(◍'౪`◍)ノ゙----(。◝‿◜。)----(ಠ .̫.̫ ಠ)----(´◞⊖◟`)----(。≖ˇェˇ≖。)----(◕ܫ◕)----(`◕‸◕´+)----(▼ _ ▼)----( ◉ืൠ◉ื)----ㄟ(◑‿◐ )ㄏ----(●'◡'●)ノ♥----(。◕ˇ∀ˇ◕)----( ◔ ڼ ◔ )----( ´◔ ‸◔`)----(☍﹏⁰)----(♥◠‿◠)----ლ(╹◡╹ლ )----(๑꒪◞౪◟꒪๑)"
str_list_emoj = str_emoj.split('----')
if ((cf.get("configuration", "flag_wx_emoj")) == '1'):
flag_wx_emoj = True
else:
flag_wx_emoj = False
print(str_list_emoj)
# 设置节日祝福语
# 情人节祝福语
str_Valentine = cf.get("configuration", "str_Valentine")
print(str_Valentine)
# 三八妇女节祝福语
str_Women = cf.get("configuration", "str_Women")
print(str_Women)
# 平安夜祝福语
str_Christmas_Eve = cf.get("configuration", "str_Christmas_Eve")
print(str_Christmas_Eve)
# 圣诞节祝福语
str_Christmas = cf.get("configuration", "str_Christmas")
print(str_Christmas)
# 她生日的时候的祝福语
str_birthday = cf.get("configuration", "str_birthday")
print(str_birthday)
# 开始守护女友
t = Thread(target=start_care, name='start_care')
t.start()
# 接收女友消息监听器
# 女友微信名
my_girl_friend = bot.friends().search(my_lady_wechat_name)[0]
@bot.register(chats=my_girl_friend, except_self=False)
def print_others(msg):
# 输出聊天内容
print(msg.text)
# 可采用snownlp或者jieba等进行分词、情感分析,由于打包后文件体积太大,故暂时不采用这种方式
# 仅仅是直接调用网络接口
# 做极其简单的情感分析
# 结果仅供参考,请勿完全相信
postData = {'data':msg.text}
response = post('https://bosonnlp.com/analysis/sentiment?analysisType=',data=postData)
data = response.text
# 情感评分指数(越接近1表示心情越好,越接近0表示心情越差)
now_mod_rank = (data.split(',')[0]).replace('[[','')
print("来自女友的消息:%s\n当前情感得分:%s\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n" % (msg.text, now_mod_rank))
# 发送信息到文件传输助手
mood_message = u"来自女友的消息:" + msg.text + "\n当前情感得分:" + now_mod_rank + "\n越接近1表示心情越好,越接近0表示心情越差,情感结果仅供参考,请勿完全相信!\n\n"
bot.file_helper.send(mood_message)
|
ntds_parser.py
|
import sys, re, itertools, time
from binascii import hexlify
from threading import Thread, Event
from impacket.examples.secretsdump import LocalOperations, RemoteOperations, NTDSHashes
from impacket.smbconnection import SMBConnection, SessionError
from socket import error as socket_error
def process_remote(username, password, target, historic):
hashes = list()
print("Attempting to connect to {}...".format(target))
try:
connection = SMBConnection(target, target)
connection.login(username, password, "", "", "")
ops = RemoteOperations(connection, False, None)
ops.setExecMethod("smbexec")
stopper = Event()
spinner = Thread(target=__update, args=(stopper, hashes))
spinner.start()
NTDSHashes(None, None, isRemote=True, remoteOps=ops, noLMHash=True, useVSSMethod=False,
justNTLM=True, printUserStatus=True, history=historic, lastLogon=True, pwdLastSet=True,
perSecretCallback=lambda type, secret: hashes.append(__process_hash(secret))).dump()
stopper.set()
spinner.join()
if len(hashes) == 0:
raise Exception("Extraction seemingly finished successfully but I didn't find any hashes...")
return __get_domain(hashes), hashes
except socket_error:
raise Exception("Failed to connect to {}".format(target))
except SessionError as e:
if e.error == 3221225581:
raise Exception("Username or password incorrect - please try again.")
def process_local(system, ntds, historic):
hashes = list()
print("Attempting to grab decryption key...")
ops = LocalOperations(system)
try:
bootKey = ops.getBootKey()
except:
raise Exception("Failed to retrieve decryption key. Ensure your SYSTEM hive is correct.")
print("Found key: 0x{0}.".format(hexlify(bootKey)))
stopper = Event()
spinner = Thread(target=__update, args=(stopper, hashes))
spinner.start()
NTDSHashes(ntds, bootKey, noLMHash=ops.checkNoLMHashPolicy(), useVSSMethod=True, justNTLM=True,
printUserStatus=True, history=historic, pwdLastSet=True,
perSecretCallback=lambda type, secret: hashes.append(__process_hash(secret))).dump()
stopper.set()
spinner.join()
return __get_domain(hashes), hashes
def __process_hash(hash):
user, rid, lmhash, nthash, pwdLastSet, enabled, lastLogon = re.findall("(?P<user>.*):(?P<rid>.*):(?P<lmhash>.*):(?P<ntlmhash>.*):::(?:(?: \(pwdLastSet=(?P<pwdLastSet>.*)\))(?: \(status=(?P<enabled>.*)\))(?: \(lastLogon=(?P<lastLogon>.*)\)))?", hash)[0]
history_match = re.match("(?P<user>.*)(_history\d+$)", user)
if history_match:
user = history_match.group(1)
return {"username": user.strip(), "ntlmhash": nthash, "historic": True}
else:
return {"username": user.strip(), "ntlmhash": nthash, "enabled": True if enabled == "Enabled" else False, "passwordLastSet": pwdLastSet, "lastLogon": lastLogon}
def __get_domain(hashes):
return [hash["username"].split("\\")[0] for hash in hashes if "\\" in hash["username"]][0]
def __update(stopper, hashes):
spinner = itertools.cycle(['-', '/', '|', '\\'])
while not stopper.is_set():
sys.stdout.write("[" + next(spinner) + "] (" + str(len(hashes)) + ") Finding and extracting hashes - this might take a few minutes... \r")
sys.stdout.flush()
time.sleep(0.2)
|
enqueuer_thread.py
|
# coding=utf-8
"""Given the dataset object, make a multithread enqueuer"""
import os
import queue
import threading
import contextlib
import multiprocessing
import time
import random
import sys
import utils
import traceback
# for video queuer
from nn import resizeImage
import cv2
# modified from keras
class DatasetEnqueuer(object):
def __init__(self, dataset, prefetch=5, num_workers=1,
start=True, # start the dataset get thread when init
shuffle=False,
# whether to break down each mini-batch for each gpu
is_multi_gpu=False,
last_full_batch=False, # make sure the last batch is full
):
self.dataset = dataset
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * dataset.batch_size)
self.is_multi_gpu = is_multi_gpu
self.last_full_batch = last_full_batch
self.workers = num_workers
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
self.cur_batch_count = 0
self.shuffle = shuffle
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
#print("stop called")
if self.is_running():
self._stop()
def _stop(self):
#print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
batch_idxs = list(self.dataset.valid_idxs) * self.dataset.num_epochs
if self.shuffle:
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
batch_idxs = random.sample(batch_idxs, len(batch_idxs))
if self.last_full_batch:
# make sure the batch_idxs are multiplier of batch_size
batch_idxs += [batch_idxs[-1] for _ in range(
self.dataset.batch_size - len(batch_idxs) % self.dataset.batch_size)]
while True:
with contextlib.closing(
multiprocessing.pool.ThreadPool(self.workers)) as executor:
for idx in batch_idxs:
if self.stop_signal.is_set():
return
# block until not full
self.queue.put(
executor.apply_async(self.dataset.get_sample, (idx,)), block=True)
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.dataset.num_batches:
self._stop()
return
samples = []
for i in range(self.dataset.batch_size):
# first get got the ApplyResult object,
# then second get to get the actual thing (block till get)
sample = self.queue.get(block=True).get()
self.queue.task_done()
samples.append(sample)
# break the mini-batch into mini-batches for multi-gpu
if self.is_multi_gpu:
batches = []
# a list of [frames, boxes, labels_arr, ori_boxes, box_keys]
this_batch_idxs = range(len(samples))
# pack these batches for each gpu
this_batch_idxs_gpus = utils.grouper(
this_batch_idxs, self.dataset.batch_size_per_gpu)
for this_batch_idxs_per_gpu in this_batch_idxs_gpus:
batches.append(self.dataset.collect_batch(
samples, this_batch_idxs_per_gpu))
batch = batches
else:
batch = self.dataset.collect_batch(samples)
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def count_frame_get(total_frame, frame_gap):
count = 0
cur_frame = 0
while cur_frame < total_frame:
if cur_frame % frame_gap != 0:
cur_frame += 1
continue
count += 1
cur_frame += 1
return count
class VideoEnqueuer(object):
def __init__(self,
cfg,
vcap,
num_frame,
frame_gap=1,
prefetch=5,
start=True, # start the dataset get thread when init
is_moviepy=False,
batch_size=4,
):
self.cfg = cfg
self.vcap = vcap
self.num_frame = num_frame
self.frame_gap = frame_gap
self.is_moviepy = is_moviepy
self.batch_size = batch_size
self.prefetch = prefetch # how many batch to save in queue
self.max_queue_size = int(self.prefetch * batch_size)
self.queue = None
self.run_thread = None # the thread to spawn others
self.stop_signal = None
# how many frames we are actually gonna get due to frame gap
self.get_num_frame = count_frame_get(self.num_frame, self.frame_gap)
# compute the number of batches we gonna get so we know when to stop and exit
# last batch is not enough batch_size
self.num_batches = self.get_num_frame // batch_size + \
int(self.get_num_frame % batch_size != 0)
self.cur_batch_count = 0
if start:
self.start()
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self):
self.queue = queue.Queue(self.max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def stop(self):
#print("stop called")
if self.is_running():
self._stop()
def _stop(self):
#print("_stop called")
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(0)
def __del__(self):
if self.is_running():
self._stop()
# thread to start getting batches into queue
def _run(self):
cfg = self.cfg
frame_count = 0
while frame_count < self.num_frame:
if self.stop_signal.is_set():
return
if self.is_moviepy:
suc = True
frame = next(self.vcap)
else:
suc, frame = self.vcap.read()
if not suc:
frame_count += 1
continue
if frame_count % self.frame_gap != 0:
frame_count += 1
continue
# process the frames
if self.is_moviepy:
# moviepy ask ffmpeg to get rgb24
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
im = frame.astype("float32")
resized_image = resizeImage(im, cfg.short_edge_size, cfg.max_size)
scale = (resized_image.shape[0] * 1.0 / im.shape[0] + \
resized_image.shape[1] * 1.0 / im.shape[1]) / 2.0
self.queue.put((resized_image, scale, frame_count), block=True)
frame_count += 1
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# iterator to get batch from the queue
def get(self):
if not self.is_running():
self.start()
try:
while self.is_running():
if self.cur_batch_count == self.num_batches:
self._stop()
return
batch_size = self.batch_size
# last batch
if (self.cur_batch_count == self.num_batches - 1) and (
self.get_num_frame % batch_size != 0):
batch_size = self.get_num_frame % batch_size
samples = []
for i in range(batch_size):
sample = self.queue.get(block=True)
self.queue.task_done()
samples.append(sample)
batch = samples
self.cur_batch_count += 1
yield batch
except Exception as e: # pylint: disable=broad-except
self._stop()
_type, _value, _traceback = sys.exc_info()
print("Exception in enqueuer.get: %s" % e)
traceback.print_tb(_traceback)
raise Exception
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
|
tf_util.py
|
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.1
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
self.input_names = {inp.name.split("/")[-1].split(":")[0]: inp for inp in inputs}
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert len(args) + len(kwargs) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for inpt_name, value in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
build_config.py
|
#!/usr/bin/env python3
"""Utilities for generating/processing logger configurations.
Typical use would be something like this:
import pprint
import threading
from logger.utils.build_config import BuildConfig
from logger.utils.read_json import read_json
from logger.listener.listen import ListenerFromLoggerConfig
vars = {
"%CRUISE%" : "NBP1700",
"%INST%" : ["knud", "seap", "grv1", "gyr1"]
}
generic_templates = {
"%INST%_SERIAL_READER": {
"class": "SerialReader",
"kwargs": {
"port": "/tmp/tty_%INST%",
"baudrate": 9600
}
},
"%INST%_LOGFILE_WRITER": {
"class": "LogfileWriter",
"kwargs": {"filebase": "/tmp/logs/%CRUISE%/%INST%/raw/%CRUISE%_%INST%"}
}
}
logger_template = {
# A generic logger composed of the above pieces
"%INST%_SERIAL_LOGGER": {
"readers": "%INST%_SERIAL_READER",
"transforms": {"class": "TimestampTransform"},
"writers": "%INST%_LOGFILE_WRITER"
}
}
logger_configs = BuildConfig.expand_template(vars, logger_template,
generic_templates)
pprint.pprint(logger_configs)
for logger in logger_configs:
listener = ListenerFromLoggerConfig(logger_configs[logger])
threading.Thread(target=listener.run).start()
"""
import json
import logging
import pprint
import sys
from collections import OrderedDict
sys.path.append('.')
from logger.utils.read_json import read_json
from logger.listener.listen import ListenerFromLoggerConfig
################################################################################
class BuildConfig:
"""A container class of class methods"""
############################
@classmethod
def _recursive_str_replace(self, source, old_str, new_str):
"""Recurse through a source composed of lists, dicts, tuples and
strings returning a copy of the source where string replace has been
applied to all strings, replacing old_str with new_str. If any
unrecognized elements are encountered (classes, functions, etc.)
they are returned unexplored."""
# Some type checking up front, so we don't have to do it down in the weeds
if not type(old_str) is str:
raise ValueError('recursive_str_replace: value of old_str is not '
'str: %s' % old_str)
if type(new_str) is list:
for elem in new_str:
if not type(elem) is str:
raise ValueError('recursive_str_replace: value of new_str must be '
'either a string or a list of strings: %s' % new_str)
elif type(new_str) is not str:
raise ValueError('recursive_str_replace: value of new_str must be '
'either a string or a list of strings: %s' % new_str)
# Start in on replacements. If source is a string, just do the
# string replacement. We shouldn't find ourselves in a situation
# where source is a str and new_str is a list.
if type(source) is str:
if not source.find(old_str) > -1:
return source
elif type(new_str) is str:
return source.replace(old_str, new_str)
else:
raise ValueError('recursive_str_replace: when source ("%s") is a str '
'new_str ("%s") must also be a str.'
% (source, new_str))
# Source is a list.
elif type(source) is list:
# If new_str is a simple string, just do replacement recursively
if type(new_str) is str:
return [self._recursive_str_replace(s, old_str, new_str)
for s in source]
# Else new_str is a list; do replacements of each element, and insert
# them into the present list
else:
new_list = []
for elem in source:
# For this element, we're going to create a new element for
# each value in new_str, but we're only going to keep the
# distinct elements.
new_elem_list = []
for replacement in new_str:
new_elem = self._recursive_str_replace(elem, old_str, replacement)
if not new_elem in new_elem_list:
new_elem_list.append(new_elem)
new_list += new_elem_list
return new_list
# If it's a tuple, just treat it as a list, expand, then coerce it back
elif type(source) is tuple:
return tuple(self._recursive_str_replace(list(source), old_str, new_str))
# If it's a dict, do replacements of each entry
elif type(source) is dict:
# If new_str is a simple string, just do replacement recursively
if type(new_str) is str:
return {k.replace(old_str, new_str):
self._recursive_str_replace(v, old_str, new_str)
for k, v in source.items()}
# Else new_str is a list; do replacements of each element, and insert
# them into the present dict.
else:
new_dict = {}
for key, value in source.items():
# We count on key being a str
if key.find(old_str) > -1:
for replacement in new_str:
new_key = key.replace(old_str, replacement)
new_value = self._recursive_str_replace(value, old_str,
replacement)
new_dict[new_key] = new_value
else:
new_dict[key] = self._recursive_str_replace(value, old_str, new_str)
return new_dict
# If it's anything else, we don't know what to do with it - just
# return it untouched.
else:
return source
#############################
@classmethod
def _recursive_replace(self, struct, reps):
"""Recurse through a structure composed of lists, dicts, tuples and
strings returning a copy of the structure where the following
transform has been applied: If element 'elem' appears in struct
(other than as a dict key) and if it also appears as a key in the
dictionary 'reps', replace it with the corresponding value from
reps. If any unrecognized elements are encountered (classes,
functions, etc.) they are returned explored.
"""
if not type(reps) is dict:
raise TypeError('Parameter "reps" must be a dict in _recursive_replace; '
'instead found "%s"' % reps)
if type(struct) is list:
return [self._recursive_replace(s, reps) for s in struct]
elif type(struct) is tuple:
return tuple([self._recursive_replace(s, reps) for s in struct])
elif type(struct) is dict:
logging.debug('recursing on dictionary: "%s"', str(struct))
for k,v in struct.items():
logging.debug(' %s: %s', str(k), str(v))
return {k: self._recursive_replace(v, reps) for k, v in struct.items()}
else:
logging.debug('Testing "%s" against %s', str(struct), list(reps.keys()))
if struct in reps:
logging.debug('Replacing "%s" with "%s"', str(struct), str(reps[struct]))
return reps[struct]
else:
return struct
############################
@classmethod
def expand_template(self, vars, template, source_template=None):
"""Expand the definitions in template with the definitions in
source_template, then swap in the variables. Return a new,
expanded template dict. If source_template is omitted, template
will be expanded with its own definitions."""
# Expand any vars embedded inside the vars values themselves
new_vars = {}
for k, v in vars.items():
for old_val, new_val in vars.items():
new_vars[k] = self._recursive_str_replace(v, old_val, new_val)
# Use expanded variables to fill in template and source_template
if not source_template:
source_template = template
for old_value, new_value in new_vars.items():
template = self._recursive_str_replace(template, old_value, new_value)
source_template = self._recursive_str_replace(source_template,
old_value, new_value)
# Finally, expand all the internal definitions
return self._recursive_replace(template, source_template)
############################
@classmethod
def expand_config(self, config):
"""A full configuration is a dict with a "modes" key that itself
contains a dict. Each key is the name of a cruise mode, and the
corresponding value is (yet another) dict mapping logger names to
the configuration that logger should have in that mode.
An optional top-level "default_mode" key maps to the name of the
default cruise mode that the system should start in if not other
information is available.
{
"modes": {
"off": {},
"port": {
"seap": {...},
"knud": {...},
...
},
"underway": {
"seap": {...},
"knud": {...},
...
},
...
},
"default_mode": "off"
}
A config may also have addition keys:
vars - a dict of old_str -> new_str mappings that will be applied
via recursive string replacement to the modes dict during
expansion. Values in vars may refer to other keys in the
dict, but there is not protection against circular references.
"vars": {
"%CRUISE%": "NBP1700",
"%INST%": ["seap", "knud", "gyr1"]
}
templates - a set of (usually generic) configuration definitions that
may be substituted by reference into the modes dictionary during
expansion.
"templates": {
"%INST%_SERIAL_READER": {
"class": "SerialReader",
"kwargs": {
"port": "/tmp/tty_%INST%",
"baudrate": 9600
}
},
"%INST%_LOGFILE_WRITER": {
"class": "LogfileWriter",
"kwargs": {"filebase": "/logs/%CRUISE%/%INST%/raw/%CRUISE%_%INST%"}
},
# A generic logger composed of the above pieces
"%INST%_SERIAL_LOGGER": {
"readers": "%INST%_SERIAL_READER",
"transforms": {"class": "TimestampTransform"},
"writers": "%INST%_LOGFILE_WRITER"
}
}
"""
# First, expand the templates without variables, then expand modes
# using the vars and expanded templates
vars = config.get('vars', {})
templates = config.get('templates', {})
expanded_templates = self.expand_template(vars, templates, templates)
# Create a new config dict and swap in expanded bits
#new_config = config
new_config = OrderedDict()
cruise = config.get('cruise', {})
if cruise:
new_config['cruise'] = self.expand_template(vars, cruise,
expanded_templates)
loggers = config.get('loggers', {})
if loggers:
new_config['loggers'] = self.expand_template(vars, loggers,
expanded_templates)
modes = config.get('modes', {})
if modes:
new_config['modes'] = self.expand_template(vars, modes,
expanded_templates)
default_mode = config.get('default_mode', {})
if default_mode:
new_config['default_mode'] = self.expand_template(vars, default_mode,
expanded_templates)
configs = config.get('configs', {})
if configs:
new_config['configs'] = self.expand_template(vars, configs,
expanded_templates)
return new_config
################################################################################
def validate_config(config):
modes = config.get('modes', None)
if not modes:
logging.error('No modes found in configuration')
default_mode = config.get('default_mode', None)
if default_mode:
if not default_mode in modes:
logging.error('Default mode "%s" not found in modes: %s',
default_mode, modes)
else:
logging.warning('No default mode found in configuration')
# Go through each logger in each mode and see if we can instantiate it
for mode_name, loggers in modes.items():
logging.info('Validating mode: %s', mode_name)
for logger, logger_spec in loggers.items():
logging.info(' Validating logger: %s:%s', mode_name, logger)
try:
listener = ListenerFromLoggerConfig(logger_spec)
except KeyboardInterrupt:
return
except Exception as e:
logging.error('Error validating %s in mode %s: %s',
logger, mode_name, str(e))
################################################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', action='store',
help='Name of config file to load and expand')
parser.add_argument('--validate', dest='validate', action='store_true',
help='Verify that the output is a fully-formed cruise '
'configuration')
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
args = parser.parse_args()
LOGGING_FORMAT = '%(asctime)-15s %(filename)s:%(lineno)d %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
LOG_LEVELS ={0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
args.verbosity = min(args.verbosity, max(LOG_LEVELS))
logging.getLogger().setLevel(LOG_LEVELS[args.verbosity])
config_json = read_json(args.config)
expanded_config = BuildConfig.expand_config(config_json)
if args.validate:
validate_config(expanded_config)
print(json.dumps(expanded_config, indent=4))
|
io_eth.py
|
"""--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file udp_socket.py
\brief This module contains a threaded ethernet UDP communication driver
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
import select
import socket
import threading
import os
class IoEthThread(object):
def __init__(self,remote_address,tx_queue,rx_queue,max_packet_size=1500):
self.tx_queue = tx_queue
self.rx_queue = rx_queue
self.max_packet_size = max_packet_size
self.remote_address = remote_address
"""
Initialize the UDP connection
"""
try:
self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.conn.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.conn.setblocking(0)
self.conn.bind((os.environ['ROS_IP'],self.remote_address[1]))
self.conn.connect(self.remote_address)
except:
try:
self.conn.Close()
except:
pass
self.link_up = False
return
self.need_to_terminate = False
self.listen_terminate_mutex = threading.RLock()
self.transmit_terminate_mutex = threading.RLock()
self.listenThread = threading.Thread(target = self.listen)
self.transmitThread = threading.Thread(target = self.transmit)
self.listenThread.start()
self.transmitThread.start()
self.link_up = True
def __del__(self):
with self.listen_terminate_mutex, self.transmit_terminate_mutex:
self.need_to_terminate = True
assert(self.listenThread)
assert(self.transmitThread)
self.listenThread.join()
self.transmitThread.join()
def listen(self):
while True:
with self.listen_terminate_mutex:
if self.need_to_terminate:
break
result = select.select([self.conn],[],[],0.1)
if (len(result[0])>0):
message = result[0][0].recv(self.max_packet_size)
message_bytes= map(ord, message)
self.rx_queue.put(message_bytes)
def transmit(self):
while True:
with self.listen_terminate_mutex:
if self.need_to_terminate:
break
result = select.select([self.tx_queue._reader],[],[],0.1)
if (len(result[0])>0):
data = result[0][0].recv()
message_bytes=[chr(i) for i in data]
message_bytes = ''.join(message_bytes)
self.conn.send(message_bytes)
def Close(self):
self.__del__()
self.conn.close()
self.link_up = False
|
trainer_base.py
|
"""Base class of trainers
Explanation of batched n-step training and arguments:
1. Rollout:
The basic unit of training is a length-L "rollout" of the form
{ s_t, a_t, r_t, s_{t+1}, ..., s_{t+L} }
which contains L transitions. In practice, L is not always a fixed length
as a rollout must terminate at the end of an episode.
Argument 'rollout_maxlen' is the maximum length a rollout can ever have,
and is related with the number of bootstrap steps. For example, setting
'rollout_maxlen = 1' corresponds to 1-step bootstrapped TD learning.
If we set 'rollout_maxlen = N', then the first state in the rollout will be
subject to a N-step TD learning, the second state will be subject to
a (N-1)-step TD learning, and so on.
2. Rollout list:
"rollout_list" (abbr. "rlist") is a list of (various-length) rollouts
and is guaranteed to contain a fixed number of transitions.
Argument 'rollout_maxlen' is also this fixed number.
3. Batch:
A "batch" is simply a fixed number of rollout lists. One training on
a single batch executes exactly one update to the network weights.
Argument 'batch_size' is the number of rollout lists.
"""
from multiprocessing import Process, ProcessError, Event, cpu_count
import socket
import os
import signal
import time
from datetime import timedelta
import tensorflow as tf
import numpy as np
from drlbox.layer.noisy_dense import NoisyDenseIG, NoisyDenseFG
from drlbox.net.kfac.optimizer import KfacOptimizerTV
from drlbox.net.kfac.build_layer_collection import build_layer_collection
from drlbox.common.replay import Replay, PriorityReplay
from drlbox.common.util import discrete_action, continuous_action
from drlbox.common.tasker import Tasker
from drlbox.trainer.step_counter import StepCounter
from drlbox.trainer.rollout import Rollout
from drlbox.common.namescope import TF_NAMESCOPE
LOCALHOST = 'localhost'
JOBNAME = 'local'
'''
Optimizer related default kwargs
'''
ADAM_KWARGS = dict(
learning_rate=1e-4,
epsilon=1e-4,
)
KFAC_KWARGS = dict(
learning_rate=1e-4,
cov_ema_decay=0.95,
damping=1e-3,
norm_constraint=1e-3,
momentum=0.0,
)
'''
Replay memory related default kwargs
'''
REPLAY_KWARGS = dict(
maxlen=1000,
minlen=100,
)
'''
Trainer default kwargs
'''
TRAINER_KWARGS = dict(
feature_maker=None,
model_maker=None, # if set, ignores feature_maker
num_parallel=None,
port_begin=2222,
discount=0.99,
train_steps=1000000,
rollout_maxlen=32,
batch_size=1,
online_learning=True, # whether or not to perform online learning
replay_type=None, # None, 'uniform', 'prioritized'
replay_ratio=4,
replay_priority_type='differential', # None, 'error' 'differential'
replay_kwargs={},
optimizer='adam', # 'adam', 'kfac', tf.train.Optimizer instance
opt_clip_norm=40.0,
opt_kwargs={},
kfac_inv_upd_interval=10,
noisynet=None, # None, 'ig', 'fg'
save_dir=None, # directory to save tf.keras models
save_interval=10000,
catch_signal=False, # effective on multiprocessing only
)
class Trainer(Tasker):
"""Base class of trainers."""
dense_layer = tf.keras.layers.Dense
KWARGS = {**Tasker.KWARGS, **TRAINER_KWARGS}
def run(self):
"""Run the training process."""
# change default dense_layer to noisy layer if requested
if self.noisynet is None:
pass
elif self.noisynet == 'ig':
self.dense_layer = NoisyDenseIG
self.print('Using independent Gaussian NoisyNet')
elif self.noisynet == 'fg':
self.dense_layer = NoisyDenseFG
self.print('Using factorized Gaussian NoisyNet')
else:
raise ValueError('noisynet={} is invalid'.format(self.noisynet))
if self.num_parallel is None:
self.num_parallel = cpu_count()
self.port_list = [self.port_begin + i
for i in range(self.num_parallel)]
# single process
if self.num_parallel == 1:
self.worker(0)
return
# multiprocess parallel training
print(self.port_list)
for port in self.port_list:
if not port_available(LOCALHOST, port):
raise NameError('port {} is not available'.format(port))
self.print('Claiming {} port {} ...'.format(LOCALHOST, self.port_list))
self.event_finished = Event()
self.worker_list = []
try:
for wid in range(self.num_parallel):
worker = Process(target=self.worker, args=(wid,))
worker.start()
self.worker_list.append(worker)
except ProcessError:
self.terminate_workers()
# set handlers if requested
if self.catch_signal:
self.default_sigint_handler = signal.signal(signal.SIGINT,
self.signal_handler)
self.default_sigterm_handler = signal.signal(signal.SIGTERM,
self.signal_handler)
self.print('SIGINT and SIGTERM will be caught by drlbox')
# terminates the entire training when the master worker terminates
wait_counter = 0
start_time = time.time()
while not self.event_finished.is_set():
wait_counter += 1
if wait_counter >= 3000:
wait_counter = 0
elapsed = int(time.time() - start_time)
time_str = str(timedelta(seconds=elapsed))
self.print('Elapsed time:', time_str)
time.sleep(0.1)
self.print('A worker just terminated -- training should end soon')
self.terminate_workers()
# restore default handlers
if self.catch_signal:
signal.signal(signal.SIGINT, self.default_sigint_handler)
signal.signal(signal.SIGTERM, self.default_sigterm_handler)
self.print('SIGINT and SIGTERM default handlers reset to default')
self.print('Asynchronous training has ended')
def signal_handler(self, signum, frame):
"""Signal handler for SIGINT and SIGTERM."""
self.event_finished.set()
def terminate_workers(self):
"""Gracefully terminate workers (in backward order of spawning)."""
for worker in self.worker_list[::-1]:
while worker.is_alive():
worker.terminate()
time.sleep(0.01)
def worker(self, wid):
"""Run a worker process."""
assert callable(self.env_maker)
env = self.env_maker()
# determine action mode from env.action_space
if discrete_action(env.action_space):
self.action_mode = 'discrete'
self.action_dim = env.action_space.n
elif continuous_action(env.action_space):
self.action_mode = 'continuous'
self.action_dim = len(env.action_space.shape)
self.action_low = env.action_space.low
self.action_high = env.action_space.high
else:
raise TypeError('Invalid type of env.action_space')
self.is_master = wid == 0
if self.is_master and self.save_dir is not None:
env_name = 'UnknownEnv-v0' if env.spec is None else env.spec.id
self.output = self.get_output_dir(env_name)
else:
self.output = None
# ports, cluster, and server
cluster_list = ['{}:{}'.format(LOCALHOST, p) for p in self.port_list]
cluster = tf.train.ClusterSpec({JOBNAME: cluster_list})
tf.train.Server(cluster, job_name=JOBNAME, task_index=wid)
self.print('Starting server #{}'.format(wid))
self.setup_algorithm()
# global/local devices
worker_dev = '/job:{}/task:{}/cpu:0'.format(JOBNAME, wid)
rep_dev = tf.train.replica_device_setter(worker_device=worker_dev,
cluster=cluster)
self.setup_nets(worker_dev, rep_dev, env)
if self.replay_type is not None:
replay_kwargs = {**REPLAY_KWARGS, **self.replay_kwargs}
if self.is_master:
self.print_kwargs(replay_kwargs, 'Replay memory arguments')
if self.replay_type == 'uniform':
self.replay = Replay(**replay_kwargs)
elif self.replay_type == 'prioritized':
self.replay = PriorityReplay(**replay_kwargs)
else:
message = 'replay type {} invalid'.format(self.replay_type)
raise ValueError(message)
# begin tensorflow session, build async RL agent and train
port = self.port_list[wid]
with tf.Session('grpc://{}:{}'.format(LOCALHOST, port)) as sess:
sess.run(tf.global_variables_initializer())
self.set_session(sess)
# train the agent
self.train_on_env(env)
if self.num_parallel > 1:
self.event_finished.set()
if self.is_master:
while True:
time.sleep(1)
def train_on_env(self, env):
"""Perform training on a Gym env."""
step = self.step_counter.step_count()
if self.is_master:
last_save = step
self.save_model(step)
state = env.reset()
ep_reward = 0.0
while step <= self.train_steps:
self.sync_to_global()
batch = []
for _ in range(self.batch_size):
rlist = [Rollout(state)]
for rlist_step in range(self.rollout_maxlen):
net_input = self.state_to_input(state)
act_val = self.online_net.action_values([net_input])[0]
action = self.policy.select_action(act_val)
state, reward, done, _ = env.step(action)
ep_reward += reward
rlist[-1].append(state, action, reward, done, act_val)
if done:
state = env.reset()
if rlist_step < self.rollout_maxlen - 1:
rlist.append(Rollout(state))
self.print('episode reward {:5.2f}'.format(ep_reward))
ep_reward = 0.0
if len(rlist[-1]) >= self.rollout_maxlen:
if rlist_step < self.rollout_maxlen - 1:
rlist.append(Rollout(state))
batch.append(rlist)
if self.online_learning:
# on-policy training on the newly collected rollout list
batch_result = self.train_on_batch(batch)
batch_loss_list = [batch_result[0]]
else:
batch_loss_list = []
# off-policy training if there is a memory
if self.replay_type is not None:
if self.replay_type == 'prioritized' and self.online_learning:
self.replay.extend(batch, batch_result[1])
else:
self.replay.extend(batch)
if self.replay.usable():
for _ in range(np.random.poisson(self.replay_ratio)):
batch, index, weight = \
self.replay.sample(self.batch_size)
self.sync_to_global()
if self.replay_type == 'prioritized':
loss, priority = self.train_on_batch(batch, weight)
self.replay.update_priority(index, priority)
else:
loss, = self.train_on_batch(batch)
batch_loss_list.append(loss)
# step, print, etc.
self.step_counter.increment(self.batch_size * self.rollout_maxlen)
step = self.step_counter.step_count()
if self.is_master:
if step - last_save > self.save_interval:
self.save_model(step)
last_save = step
if batch_loss_list:
loss_print = '{:3.3f}'.format(np.mean(batch_loss_list))
else:
loss_print = 'None'
self.print('training step {}/{}, loss {}'
.format(step, self.train_steps, loss_print))
# save at the end of training
if self.is_master:
self.save_model(step)
def build_net(self, env=None, is_global=False):
"""Build a neural net."""
net = self.net_cls()
if self.load_model is not None:
if is_global:
model = self.do_load_model()
self.saved_weights = model.get_weights()
else:
model = self.do_load_model(load_weights=False)
else:
if self.model_maker is None:
assert callable(self.feature_maker)
state, feature = self.feature_maker(env.observation_space)
model = self.build_model(state, feature, **self.model_kwargs)
else:
assert callable(self.model_maker)
model = self.model_maker(env)
net.set_model(model)
if self.noisynet is not None:
net.set_noise_list()
return net
def set_online_optimizer(self):
"""Set optimizer for the online network."""
if self.replay_type == 'prioritized':
opt_rep_kwargs = dict(priority_type=self.replay_priority_type,
batch_size=self.batch_size)
else:
opt_rep_kwargs = {}
if self.optimizer == 'adam':
adam_kwargs = {**ADAM_KWARGS, **self.opt_kwargs}
if self.is_master:
self.print_kwargs(adam_kwargs, 'Adam arguments')
adam = tf.train.AdamOptimizer(**adam_kwargs)
self.online_net.set_optimizer(adam, self.opt_clip_norm,
self.global_net.weights,
**opt_rep_kwargs)
elif self.optimizer == 'kfac':
kfac_kwargs = {**KFAC_KWARGS, **self.opt_kwargs}
if self.is_master:
self.print_kwargs(kfac_kwargs, 'KFAC arguments')
with tf.name_scope(TF_NAMESCOPE):
layer_collection = build_layer_collection(
layer_list=self.online_net.model.layers,
loss_list=self.online_net.kfac_loss_list,
)
kfac = KfacOptimizerTV(**kfac_kwargs,
layer_collection=layer_collection,
var_list=self.online_net.weights)
self.online_net.set_kfac(kfac, self.kfac_inv_upd_interval,
train_weights=self.global_net.weights,
**opt_rep_kwargs)
elif isinstance(self.optimizer, tf.train.Optimizer):
# if self.optimizer is a (subclass) instance of tf.train.Optimizer
self.online_net.set_optimizer(self.optimizer, self.opt_clip_norm,
self.global_net.weights)
else:
raise ValueError('Optimizer {} invalid'.format(self.optimizer))
def get_output_dir(self, env_name):
"""Get an output directory for saving Keras models."""
if not os.path.isdir(self.save_dir):
os.makedirs(self.save_dir, exist_ok=True)
self.print('Made output dir', self.save_dir)
save_dir = self.save_dir
experiment_id = 0
for folder_name in os.listdir(save_dir):
if not os.path.isdir(os.path.join(save_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except ValueError:
pass
experiment_id += 1
save_dir = os.path.join(save_dir, env_name)
save_dir += '-run{}'.format(experiment_id)
os.makedirs(save_dir, exist_ok=True)
return save_dir
def save_model(self, step):
"""Save a Keras model."""
if self.output is not None:
filename = os.path.join(self.output, 'model_{}.h5'.format(step))
self.online_net.save_model(filename)
self.print('keras model written to {}'.format(filename))
# Methods subject to overloading
def setup_algorithm(self):
"""Setup properties needed by the algorithm."""
raise NotImplementedError
def setup_nets(self, worker_dev, rep_dev, env):
"""Setup all neural networks."""
# global net
with tf.device(rep_dev):
self.global_net = self.build_net(env, is_global=True)
if self.is_master and self.verbose:
self.global_net.model.summary()
self.step_counter = StepCounter()
if self.num_parallel > 1:
# local net
with tf.device(worker_dev):
self.online_net = self.build_net(env)
self.online_net.set_loss(**self.loss_kwargs)
self.set_online_optimizer()
self.online_net.set_sync_weights(self.global_net.weights)
self.step_counter.set_increment()
else:
self.online_net = self.global_net
self.online_net.set_loss(**self.loss_kwargs)
self.set_online_optimizer()
self.step_counter.set_increment()
def build_model(self, state, feature, **kwargs):
"""Return a Keras model."""
raise NotImplementedError
def set_session(self, sess):
"""Set TensorFlow session for networks and step counter."""
for obj in self.global_net, self.online_net, self.step_counter:
obj.set_session(sess)
if self.load_model is not None:
self.global_net.set_sync_weights(self.saved_weights)
self.global_net.sync()
def sync_to_global(self):
"""Synchronize the online network to the global network."""
if self.num_parallel > 1:
self.online_net.sync()
if self.noisynet is not None:
self.online_net.sample_noise()
def train_on_batch(self, batch, batch_weight=None):
"""Train on a batch of rollout lists."""
b_r_state = []
b_r_slice = []
last_index = 0
b_rollout = []
for rlist in batch:
for rollout in rlist:
b_rollout.append(rollout)
r_state = []
for state in rollout.state_list:
r_state.append(self.state_to_input(state))
r_state = np.array(r_state)
b_r_state.append(r_state)
index = last_index + len(r_state)
b_r_slice.append(slice(last_index, index))
last_index = index
cc_state = np.concatenate(b_r_state)
if batch_weight is None:
cc_weight = None
else:
cc_weight = [weight for rlist, weight in zip(batch, batch_weight)
for rollout in rlist for _ in range(len(rollout))]
# cc_boots is a tuple of concatenated bootstrap quantities
cc_boots = self.concat_bootstrap(cc_state, b_r_slice)
# b_r_boots is a list of tuple of boostrap quantities
# and each tuple corresponds to a rollout
b_r_boots = [tuple(boot[r_slice] for boot in cc_boots)
for r_slice in b_r_slice]
# feed_list contains all arguments to train_on_batch
feed_list = []
for rollout, r_state, r_boot in zip(b_rollout, b_r_state, b_r_boots):
r_input = r_state[:-1]
r_feeds = self.rollout_feed(rollout, *r_boot)
feed_list.append((r_input, *r_feeds))
# concatenate individual types of feeds from the list
cc_args = *(np.concatenate(fd) for fd in zip(*feed_list)), cc_weight
batch_result = self.online_net.train_on_batch(*cc_args)
return batch_result
def concat_bootstrap(self, cc_state, b_r_slice):
"""Return bootstrapped quantities for a concatenated batch."""
raise NotImplementedError
def rollout_feed(self, rollout, *rollout_bootstraps):
"""Return feeds for a rollout."""
raise NotImplementedError
def rollout_target(self, rollout, value_last):
"""Return target value for a rollout."""
reward_long = 0.0 if rollout.done else value_last
r_target = np.zeros(len(rollout))
for idx in reversed(range(len(rollout))):
reward_long *= self.discount
reward_long += rollout.reward_list[idx]
r_target[idx] = reward_long
return r_target
def port_available(host, port):
"""Check availability of the given port on host."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex((host, port)) != 0
|
watcher.py
|
import logging
import os.path
import threading
import time
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
can_watch = True
except ImportError:
Observer = None
FileSystemEventHandler = object
PollingObserver = None
can_watch = False
from galaxy.util.hash_util import md5_hash_file
try:
from galaxy.web.stack import register_postfork_function
except ImportError:
# noop, replacement for non-web environments
def register_postfork_function(f):
pass
log = logging.getLogger(__name__)
def get_observer_class(config_value, default, monitor_what_str):
"""
"""
config_value = config_value or default
config_value = str(config_value).lower()
if config_value in ("true", "yes", "on", "auto"):
expect_observer = True
observer_class = Observer
elif config_value == "polling":
expect_observer = True
observer_class = PollingObserver
elif config_value in ('false', 'no', 'off'):
expect_observer = False
observer_class = None
else:
message = "Unrecognized value for watch_tools config option: %s" % config_value
raise Exception(message)
if expect_observer and observer_class is None:
message = "Watchdog library unavailable, cannot monitor %s." % monitor_what_str
if config_value == "auto":
log.info(message)
else:
raise Exception(message)
return observer_class
def get_tool_conf_watcher(reload_callback, tool_cache=None):
return ToolConfWatcher(reload_callback=reload_callback, tool_cache=tool_cache)
def get_tool_data_dir_watcher(tool_data_tables, config):
config_value = getattr(config, "watch_tool_data_dir", None)
observer_class = get_observer_class(config_value, default="False", monitor_what_str="tool-data directory")
if observer_class is not None:
return ToolDataWatcher(observer_class, tool_data_tables=tool_data_tables)
else:
return NullWatcher()
def get_tool_watcher(toolbox, config):
config_value = getattr(config, "watch_tools", None)
observer_class = get_observer_class(config_value, default="False", monitor_what_str="tools")
if observer_class is not None:
return ToolWatcher(toolbox, observer_class=observer_class)
else:
return NullWatcher()
class ToolConfWatcher(object):
def __init__(self, reload_callback, tool_cache=None):
self.paths = {}
self.cache = tool_cache
self._active = False
self._lock = threading.Lock()
self.thread = threading.Thread(target=self.check, name="ToolConfWatcher.thread")
self.thread.daemon = True
self.reload_callback = reload_callback
def start(self):
if not self._active:
self._active = True
register_postfork_function(self.thread.start)
def shutdown(self):
if self._active:
self._active = False
self.thread.join()
def check(self):
"""Check for changes in self.paths or self.cache and call the event handler."""
hashes = {key: None for key in self.paths.keys()}
while self._active:
do_reload = False
with self._lock:
paths = list(self.paths.keys())
for path in paths:
try:
if not os.path.exists(path):
continue
mod_time = self.paths[path]
if not hashes.get(path, None):
hash = md5_hash_file(path)
if hash:
hashes[path] = md5_hash_file(path)
else:
continue
new_mod_time = os.path.getmtime(path)
if new_mod_time > mod_time:
new_hash = md5_hash_file(path)
if hashes[path] != new_hash:
self.paths[path] = new_mod_time
hashes[path] = new_hash
log.debug("The file '%s' has changes.", path)
do_reload = True
except IOError:
# in rare cases `path` may be deleted between `os.path.exists` calls
# and reading the file from the filesystem. We do not want the watcher
# thread to die in these cases.
try:
del hashes[path]
del paths[path]
except KeyError:
pass
if self.cache:
self.cache.cleanup()
do_reload = True
if not do_reload and self.cache:
removed_ids = self.cache.cleanup()
if removed_ids:
do_reload = True
if do_reload:
self.reload_callback()
time.sleep(1)
def monitor(self, path):
mod_time = None
if os.path.exists(path):
mod_time = os.path.getmtime(path)
with self._lock:
self.paths[path] = mod_time
if not self._active:
self.start()
def watch_file(self, tool_conf_file):
self.monitor(tool_conf_file)
if not self._active:
self.start()
class NullToolConfWatcher(object):
def start(self):
pass
def shutdown(self):
pass
def monitor(self, conf_path):
pass
def watch_file(self, tool_file, tool_id):
pass
class ToolWatcher(object):
def __init__(self, toolbox, observer_class):
self.toolbox = toolbox
self.tool_file_ids = {}
self.tool_dir_callbacks = {}
self.monitored_dirs = {}
self.observer = observer_class()
self.event_handler = ToolFileEventHandler(self)
self.start()
def start(self):
register_postfork_function(self.observer.start)
def shutdown(self):
self.observer.stop()
self.observer.join()
def monitor(self, dir):
self.observer.schedule(self.event_handler, dir, recursive=False)
def watch_file(self, tool_file, tool_id):
tool_file = os.path.abspath(tool_file)
self.tool_file_ids[tool_file] = tool_id
tool_dir = os.path.dirname(tool_file)
if tool_dir not in self.monitored_dirs:
self.monitored_dirs[tool_dir] = tool_dir
self.monitor(tool_dir)
def watch_directory(self, tool_dir, callback):
tool_dir = os.path.abspath(tool_dir)
self.tool_dir_callbacks[tool_dir] = callback
if tool_dir not in self.monitored_dirs:
self.monitored_dirs[tool_dir] = tool_dir
self.monitor(tool_dir)
class ToolDataWatcher(object):
def __init__(self, observer_class, tool_data_tables):
self.tool_data_tables = tool_data_tables
self.monitored_dirs = {}
self.path_hash = {}
self.observer = observer_class()
self.event_handler = LocFileEventHandler(self)
self.start()
def start(self):
register_postfork_function(self.observer.start)
def shutdown(self):
self.observer.stop()
self.observer.join()
def monitor(self, dir):
self.observer.schedule(self.event_handler, dir, recursive=True)
def watch_directory(self, tool_data_dir):
tool_data_dir = os.path.abspath(tool_data_dir)
if tool_data_dir not in self.monitored_dirs:
self.monitored_dirs[tool_data_dir] = tool_data_dir
self.monitor(tool_data_dir)
class LocFileEventHandler(FileSystemEventHandler):
def __init__(self, loc_watcher):
self.loc_watcher = loc_watcher
def on_any_event(self, event):
self._handle(event)
def _handle(self, event):
# modified events will only have src path, move events will
# have dest_path and src_path but we only care about dest. So
# look at dest if it exists else use src.
path = getattr(event, 'dest_path', None) or event.src_path
path = os.path.abspath(path)
if path.endswith(".loc"):
cur_hash = md5_hash_file(path)
if cur_hash:
if self.loc_watcher.path_hash.get(path) == cur_hash:
return
else:
time.sleep(0.5)
if cur_hash != md5_hash_file(path):
# We're still modifying the file, it'll be picked up later
return
self.loc_watcher.path_hash[path] = cur_hash
self.loc_watcher.tool_data_tables.reload_tables(path=path)
class ToolFileEventHandler(FileSystemEventHandler):
def __init__(self, tool_watcher):
self.tool_watcher = tool_watcher
def on_any_event(self, event):
self._handle(event)
def _handle(self, event):
# modified events will only have src path, move events will
# have dest_path and src_path but we only care about dest. So
# look at dest if it exists else use src.
path = getattr(event, 'dest_path', None) or event.src_path
path = os.path.abspath(path)
tool_id = self.tool_watcher.tool_file_ids.get(path, None)
if tool_id:
try:
self.tool_watcher.toolbox.reload_tool_by_id(tool_id)
except Exception:
pass
elif path.endswith(".xml"):
directory = os.path.dirname(path)
dir_callback = self.tool_watcher.tool_dir_callbacks.get(directory, None)
if dir_callback:
tool_file = event.src_path
tool_id = dir_callback(tool_file)
if tool_id:
self.tool_watcher.tool_file_ids[tool_file] = tool_id
class NullWatcher(object):
def start(self):
pass
def shutdown(self):
pass
def watch_file(self, tool_file, tool_id):
pass
def watch_directory(self, tool_dir, callback=None):
pass
|
py_cli.py
|
"""
This module privodes core algrithm to pick up proxy ip resources.
"""
import time
import threading
from utils import get_redis_conn
from config.settings import (
DATA_ALL, LOWEST_TOTAL_PROXIES)
from .core import IPFetcherMixin
__all__ = ['ProxyFetcher']
lock = threading.RLock()
class Strategy:
strategy = None
def check(self, strategy):
return self.strategy == strategy
def get_proxies_by_stragery(self, pool):
"""
:param pool: pool is a list, which is mutable
:return:
"""
raise NotImplementedError
class RobinStrategy(Strategy):
def __init__(self):
super().__init__()
self.strategy = 'robin'
def get_proxies_by_stragery(self, pool):
if not pool:
return None
proxy = pool.pop(0)
pool.append(proxy)
return proxy
class GreedyStrategy(Strategy):
def __init__(self):
self.strategy = 'greedy'
def get_proxies_by_stragery(self, pool):
if not pool:
return None
return pool[0]
class ProxyFetcher(IPFetcherMixin):
def __init__(self, usage, strategy='robin', fast_response=5, redis_args=None):
"""
:param usage: one of SCORE_MAPS's keys, such as https
you must refresh pool
:param strategy: the load balance of proxy ip, the value is
one of ['robin', 'greedy']
:param fast_response: if you use greedy strategy, if will be needed to
decide whether a proxy ip should continue to be used
:param redis_args: redis connetion args, it's a dict, the keys
include host, port, db and password
"""
# if there are multi parent classes, super is only used for the first parent according to MRO
super().__init__(usage)
self.strategy = strategy
# pool is a queue, which is FIFO
self.pool = list()
self.fast_response = fast_response
self.handlers = [RobinStrategy(), GreedyStrategy()]
if isinstance(redis_args, dict):
self.conn = get_redis_conn(**redis_args)
else:
self.conn = get_redis_conn()
t = threading.Thread(target=self._refresh_periodically)
t.setDaemon(True)
t.start()
def get_proxy(self):
"""
get one available proxy from redis, if not any, None is returned
:return:
"""
proxy = None
self.refresh()
for handler in self.handlers:
if handler.strategy == self.strategy:
proxy = handler.get_proxies_by_stragery(self.pool)
return proxy
def get_proxies(self):
# the older proxies will not be droped
proxies = self.get_available_proxies(self.conn)
# client_logger.info('{} proxies have been fetched'.format(len(proxies)))
print('{} proxies have been fetched'.format(len(proxies)))
self.pool.extend(proxies)
return self.pool
def proxy_feedback(self, res, proxy, response_time=None):
"""
client should give feedbacks after executing get_proxy()
:param res: value of 'success' or 'failure'
:param proxy: proxy ip
:param response_time: the response time using current proxy ip
"""
if res == 'failure':
lock.acquire()
if proxy == self.pool[0]:
self.pool.pop(0)
elif proxy == self.pool[-1]:
self.pool.pop()
self.delete_proxy(proxy)
lock.release()
return
# if the proxy response time is too long, add it to the tail of the list
if self.strategy == 'greedy' and self.fast_response*1000 < response_time:
self.pool.pop(0)
self.pool.append(proxy)
def refresh(self):
if len(self.pool) < LOWEST_TOTAL_PROXIES:
self.get_proxies()
def delete_proxy(self, proxy):
pipe = self.conn.pipeline(True)
pipe.srem(DATA_ALL, proxy)
pipe.zrem(self.score_queue, proxy)
pipe.zrem(self.speed_queue, proxy)
pipe.zrem(self.ttl_queue, proxy)
pipe.execute()
def _refresh_periodically(self):
"""refresh self.pool periodically.Check 10 times in a second"""
while True:
if len(self.pool) < int(2*LOWEST_TOTAL_PROXIES):
self.get_proxies()
time.sleep(0.2)
|
cc_dcr.py
|
# coding=utf-8
"""The main file of the dcr-cc that is executed"""
from threading import Thread
import cmd_parser
import eventlog_parser
from conf_data import ConformanceAnalysisData, TraceConformanceAnalysisData
from graph import DCRGraph
from marking import Marking
def perform_conformance_checking(trace, ca):
"""
The perform conformance checking method gets a trace as an input and then simulates the model with
the constraints retrieved from the DCR graph.
:param ca: The conformance analysis data object that is used for the overall conformance checking
:param trace: the trace that is checked within this thread
:return:
"""
marking = Marking.get_initial_marking()
trace_conformance_data = TraceConformanceAnalysisData(trace)
for event in trace.Events:
node = dcr_graph.get_node_by_name(event.EventName)
marking.perform_transition_node(node, event, trace_conformance_data)
if len(marking.PendingResponse) != 0:
for pending in marking.PendingResponse:
if pending in marking.Included:
trace_conformance_data.add_violating_pending(pending.ActivityName)
if trace_conformance_data.HasViolations:
ca.append_conformance_data(trace_conformance_data)
def main():
"""
Program main method starts by parsing the DCR graph afterwards retrieving the Event Log
subsequently the conformance is checked
:return:
"""
global dcr_graph
# input
dcr_graph = DCRGraph.get_graph_instance(xml_path)
event_log = eventlog_parser.get_event_log(data_path, use_celonis)
ca = ConformanceAnalysisData()
# throughput
# if parallel is set: a thread pool is created
if parallel:
threads = []
for trace in event_log.Traces:
t = Thread(target=perform_conformance_checking, args=(trace, ca))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
# sequential conformance checking (Debugging purposes)
else:
for trace in event_log.Traces:
perform_conformance_checking(trace, ca)
# output
create_conformance_output(ca, event_log)
def create_conformance_output(ca, event_log):
"""
Creates the console output of the program
:param ca:
:param event_log:
:return:
"""
if len(ca.ViolatingTraces) > 0:
# Calculate ratios and replay fitness, Round up to two digits
violating_case_ratio = len(ca.ViolatingTraces) / len(event_log.Traces)
replay_fitness = 1 - violating_case_ratio
replay_fitness = "%.2f" % replay_fitness
violating_case_ratio *= 100
violating_case_ratio = "%.2f" % violating_case_ratio
conformance_ratio = 100 - float(violating_case_ratio)
# Output
print('All in all, {} of {} violated the process model'.format(len(ca.ViolatingTraces), len(event_log.Traces)))
print('The ratio of violating cases is: {}%'.format(violating_case_ratio))
print("Thus, the conformance ratio is: {}%".format(conformance_ratio))
print("The replay fitness is: {}%".format(replay_fitness))
# Sort the dictionaries for the descending order of occurrences
sorted_including_violation = sorted(ca.ViolatedActivities.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_roles = sorted(ca.ViolatedRoles.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_pending = sorted(ca.ViolatedPending.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_connections = sorted(ca.ViolatedConnections.items(), key=lambda kv: kv[1], reverse=True)
sorted_violated_cases = sorted(ca.create_violated_traces_dict().items(), key=lambda kv: kv[1], reverse=True)
# Print all detailed information
print("\n{} process paths failed the events\n".format(len(sorted_violated_cases)))
for process_path in sorted_violated_cases:
print("The process path:\n\"{}\" \t was non-conformant {} times ".format(process_path[0], process_path[1]))
for included_violation in sorted_including_violation:
print('The activity \"{}\" has been executed {} times even though it was not included'.format(
included_violation[0], included_violation[1]))
for violated_role in sorted_violated_roles:
print('The role \"{}\" was misused \"{}\" times'.format(violated_role[0], violated_role[1]))
for violated_pending in sorted_violated_pending:
print('The activity {} was pending at the end in {} cases'.format(violated_pending[0], violated_pending[1]))
for violated_connection in sorted_violated_connections:
print('The {} was violated in {} traces'.format(violated_connection[0], violated_connection[1]))
else:
print('The conformance ratio is 100%')
def add_dcr_graph_for_test(test_graph):
"""
For unit tests with the main class a dcr graph can be added
:param test_graph: the created test graph
:return:
"""
global dcr_graph
dcr_graph = test_graph
if __name__ == '__main__':
# input parameters
args = cmd_parser.parse_args()
data_path = args.eventLog
xml_path = args.XmlDcr
use_celonis = False
if args.useCelonis is not None:
use_celonis = True
parallel = True
dcr_graph = None
columns_work = None
main()
|
collections.py
|
import collections
import threading
import time
from multiprocessing import Process
candle = collections.deque("candle")
def burn(direction,nextsource):
while True:
try:
next = nextsource()
time.sleep(0.1)
except IndexError:
break
else:
print("%s:%s\n"%(direction,next))
print("Done %s \n"%direction)
# 创建两个线程分别从两边去双向队列中取值
# left = threading.Thread(target = burn , args = ("left",candle.popleft))
# right = threading.Thread(target = burn , args = ("right",candle.pop))
left = Process(target = burn , args = ("left",candle.popleft))
right = Process(target = burn , args = ("right",candle.pop))
if __name__ == "__main__":
left.start()
right.start()
left.join()
right.join()
|
reconnect_test.py
|
import time
from threading import Thread
from hazelcast.errors import HazelcastError, TargetDisconnectedError
from hazelcast.lifecycle import LifecycleState
from hazelcast.util import AtomicInteger
from tests.base import HazelcastTestCase
from tests.util import event_collector
class ReconnectTest(HazelcastTestCase):
rc = None
def setUp(self):
self.rc = self.create_rc()
self.cluster = self.create_cluster(self.rc)
def tearDown(self):
self.shutdown_all_clients()
self.rc.exit()
def test_start_client_with_no_member(self):
with self.assertRaises(HazelcastError):
self.create_client(
{
"cluster_members": [
"127.0.0.1:5701",
"127.0.0.1:5702",
"127.0.0.1:5703",
],
"cluster_connect_timeout": 2,
}
)
def test_start_client_before_member(self):
def run():
time.sleep(1.0)
self.cluster.start_member()
t = Thread(target=run)
t.start()
self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
t.join()
def test_restart_member(self):
member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
state = [None]
def listener(s):
state[0] = s
client.lifecycle_service.add_listener(listener)
member.shutdown()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LifecycleState.DISCONNECTED))
self.cluster.start_member()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LifecycleState.CONNECTED))
def test_listener_re_register(self):
member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
map = client.get_map("map").blocking()
collector = event_collector()
reg_id = map.add_entry_listener(added_func=collector)
self.logger.info("Registered listener with id %s", reg_id)
member.shutdown()
self.cluster.start_member()
count = AtomicInteger()
def assert_events():
if client.lifecycle_service.is_running():
while True:
try:
map.put("key-%d" % count.get_and_increment(), "value")
break
except TargetDisconnectedError:
pass
self.assertGreater(len(collector.events), 0)
else:
self.fail("Client disconnected...")
self.assertTrueEventually(assert_events)
def test_member_list_after_reconnect(self):
old_member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_connect_timeout": 5.0,
}
)
old_member.shutdown()
new_member = self.cluster.start_member()
def assert_member_list():
members = client.cluster_service.get_members()
self.assertEqual(1, len(members))
self.assertEqual(new_member.uuid, str(members[0].uuid))
self.assertTrueEventually(assert_member_list)
def test_reconnect_toNewNode_ViaLastMemberList(self):
old_member = self.cluster.start_member()
client = self.create_client(
{
"cluster_name": self.cluster.id,
"cluster_members": [
"127.0.0.1:5701",
],
"smart_routing": False,
"cluster_connect_timeout": 10.0,
}
)
new_member = self.cluster.start_member()
old_member.shutdown()
def assert_member_list():
members = client.cluster_service.get_members()
self.assertEqual(1, len(members))
self.assertEqual(new_member.uuid, str(members[0].uuid))
self.assertTrueEventually(assert_member_list)
|
alarm_perf.py
|
import datetime
import re
import sys
import time
import multiprocessing
from monascaclient import client
from monascaclient import ksclient
from agent_sim import agent_sim_process
import warnings
# suppress warnings to improve performance
def no_warnings(message, category, filename, lineno):
pass
warnings.showwarning = no_warnings
num_processes = 10
num_requests = 4
num_metrics = 100
num_definitions = 2
max_wait_time = 20 # Seconds
# specify if the test should remove the generated alarms
cleanup_after_test = False
keystone = {
'username': 'mini-mon',
'password': 'password',
'project': 'test',
#'auth_url': 'http://10.22.156.20:35358/v3',
'auth_url': 'http://192.168.10.5:35357/v3'
}
# monasca api urls
urls = [
#'https://mon-ae1test-monasca01.useast.hpcloud.net:8080/v2.0',
#'https://mon-ae1test-monasca02.useast.hpcloud.net:8080/v2.0',
#'https://mon-ae1test-monasca03.useast.hpcloud.net:8080/v2.0',
'http://192.168.10.4:8080/v2.0',
]
metric_name = 'alarm_perf'
metric_dimension = 'dim1'
alarm_def_name = 'alarm_perf_test'
alarm_def_expression = '{} > 0'
if len(sys.argv) >= 2:
num_processes = int(sys.argv[1])
total_metrics = num_processes*num_requests*num_metrics
pattern = re.compile(alarm_def_name+'[0-9]+')
class MetricCreatorAlarmPerf():
""" Generates metrics
"""
def __init__(self, proc_num):
self.proc_num = proc_num
self.num_calls = 0
def create_metric(self):
host_num = self.num_calls + self.proc_num * num_requests * num_metrics
metric = {"name": metric_name + str(self.proc_num % num_definitions),
"dimensions": {metric_dimension: "value-" + str(host_num)},
"timestamp": time.time()*1000 + self.num_calls, # make sure each timestamp is unique,
# else influx 9 will overwrite previous metric
"value": 0}
self.num_calls += 1
return metric
def cleanup(monasca_client, name):
matched = 0
for definition in monasca_client.alarm_definitions.list():
if pattern.match(definition['name']):
print(definition['name'])
monasca_client.alarm_definitions.delete(alarm_id=definition['id'])
matched += 1
print("Removed {} definitions".format(matched))
def create_alarm_definition(monasca_client, name, expression):
try:
resp = monasca_client.alarm_definitions.create(
name=name,
expression=expression,
match_by=[metric_dimension]
)
print('Alarm Definition ID: {}'.format(resp['id']))
return resp['id']
except Exception as ex:
print('Could not create alarm definition\n{}'.format(ex))
return None
def aggregate_sent_metric_count(sent_q):
total_sent = 0
while not sent_q.empty():
item = sent_q.get()
if isinstance(item,int):
total_sent += item
else:
print(item)
return total_sent
def alarm_performance_test():
if num_processes < num_definitions:
return False, "Number of agents ({0}) must be >= number of definitions ({1})".format(num_processes,
num_definitions)
try:
print('Authenticating with keystone on {}'.format(keystone['auth_url']))
ks_client = ksclient.KSClient(**keystone)
except Exception as ex:
return False, 'Failed to authenticate: {}'.format(ex)
mon_client = client.Client('2_0', urls[0], token=ks_client.token)
print('Removing old alarm definitions for {}'.format(alarm_def_name))
cleanup(mon_client, alarm_def_name)
alarm_def_id_list = []
print('Creating alarm definitions')
for i in xrange(num_definitions):
expression = alarm_def_expression.format(metric_name+str(i))
alarm_def_id = create_alarm_definition(mon_client, alarm_def_name+str(i), expression)
if not alarm_def_id:
return False, "Failed to create alarm definition"
alarm_def_id_list.append(alarm_def_id)
sent_q = multiprocessing.Queue()
process_list = []
for i in xrange(num_processes):
p = multiprocessing.Process(target=agent_sim_process(i, num_requests, num_metrics, urls[(i % len(urls))],
keystone, queue=sent_q,
metric_creator=MetricCreatorAlarmPerf).run)
process_list.append(p)
start_datetime = datetime.datetime.now()
start_datetime = start_datetime - datetime.timedelta(microseconds=start_datetime.microsecond)
print("Starting test at: " + start_datetime.isoformat())
start_time = time.time()
for p in process_list:
p.start()
try:
for p in process_list:
try:
p.join()
except Exception:
pass
except KeyboardInterrupt:
return False, "User interrupt"
final_time = time.time()
# There is some chance that not all metrics were sent (lost connection, bad status, etc.)
total_metrics_sent = aggregate_sent_metric_count(sent_q)
print('Sent {} metrics in {} seconds'.format(total_metrics_sent,final_time-start_time))
if total_metrics_sent <= 0:
return False, "Failed to send metrics"
print('Waiting for alarms to be created')
alarm_count = 0
last_count = 0
last_change = time.time()
while alarm_count < total_metrics_sent:
alarm_count = 0
for id in alarm_def_id_list:
num = len(mon_client.alarms.list(alarm_definition_id=id))
alarm_count += num
if alarm_count > last_count:
last_change = time.time()
last_count = alarm_count
if (last_change + max_wait_time) <= time.time():
metrics_found = 0
for i in xrange(num_definitions):
val = len(mon_client.metrics.list_measurements(start_time=start_datetime.isoformat(), name=metric_name+str(i),
merge_metrics=True)[0]['measurements'])
metrics_found += val
return False, "Max wait time exceeded, {0} / {1} alarms found".format(alarm_count, metrics_found)
time.sleep(1)
delta = last_change - start_time
tot_met = 0
for i in xrange(num_definitions):
metrics = mon_client.metrics.list_measurements(start_time=start_datetime.isoformat(), name=metric_name+str(i),
merge_metrics=True)
tot_met += len(metrics[0]['measurements'])
print("Metrics from api: {}".format(tot_met))
print("-----Test Results-----")
print("{} alarms in {} seconds".format(alarm_count, delta))
print("{} per second".format(alarm_count/delta))
if cleanup_after_test:
cleanup(mon_client, alarm_def_name)
return True, ""
def main():
success, msg = alarm_performance_test()
if not success:
print("-----Test failed to complete-----")
print(msg)
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
views.py
|
import datetime
import logging
import re
import threading
from typing import Optional, List
import pytz
import simplejson as json
from django.contrib.auth.decorators import login_required
from laboratory.decorators import group_required
from django.core.exceptions import ValidationError
from django.db import transaction, connections
from django.db.models import Prefetch, Q
from django.forms import model_to_dict
from django.http import JsonResponse
from api import sql_func
from appconf.manager import SettingManager
from clients.models import (
CardBase,
Individual,
Card,
Document,
DocumentType,
District,
AnamnesisHistory,
DispensaryReg,
CardDocUsage,
BenefitReg,
BenefitType,
VaccineReg,
Phones,
AmbulatoryData,
AmbulatoryDataHistory,
DispensaryRegPlans,
ScreeningRegPlan,
)
from contracts.models import Company
from directions.models import Issledovaniya
from directory.models import Researches
from laboratory import settings
from laboratory.utils import strdate, start_end_year, localtime
from rmis_integration.client import Client
from slog.models import Log
from statistics_tickets.models import VisitPurpose
from tfoms.integration import match_enp, match_patient
from directory.models import DispensaryPlan
from utils.data_verification import data_parse
logger = logging.getLogger(__name__)
def full_patient_search_data(p, query):
dp = re.compile(r'^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$')
split = str(re.sub(' +', ' ', str(query))).split()
n = p = ""
f = split[0]
rmis_req = {"surname": f + "%"}
if len(split) > 1:
n = split[1]
rmis_req["name"] = n + "%"
if len(split) > 2:
if re.search(dp, split[2]):
split = [split[0], split[1], '', split[2]]
else:
p = split[2]
rmis_req["patrName"] = p + "%"
if len(split) > 3:
if '.' in split[3]:
btday = split[3].split(".")
elif len(split[3]) == 8 and split[3].isdigit():
btday = [split[3][0:2], split[3][2:4], split[3][4:8]]
else:
btday = None
if btday:
btday = btday[2] + "-" + btday[1] + "-" + btday[0]
rmis_req["birthDate"] = btday
return f, n, p, rmis_req, split
@login_required
def patients_search_card(request):
objects = []
data = []
d = json.loads(request.body)
inc_rmis = d.get('inc_rmis')
always_phone_search = d.get('always_phone_search')
tfoms_module = SettingManager.l2('tfoms')
birthday_order = SettingManager.l2('birthday_order')
inc_tfoms = d.get('inc_tfoms') and tfoms_module
card_type = CardBase.objects.get(pk=d['type'])
query = d.get('query', '').strip()
suggests = d.get('suggests', False)
extended_search = d.get('extendedSearch', False)
limit = min(int(d.get('limit', 10)), 20)
form = d.get('form', {})
p = re.compile(r'^[а-яё]{3}[0-9]{8}$', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.?[0-9]{2}\.?[0-9]{4}))?)?)?$')
p_tfoms = re.compile(r'^([А-яЁё\-]+) ([А-яЁё\-]+)( ([А-яЁё\-]+))? (([0-9]{2})\.?([0-9]{2})\.?([0-9]{4}))$')
p3 = re.compile(r'^[0-9]{1,15}$')
p_enp_re = re.compile(r'^[0-9]{16}$')
p_enp = bool(re.search(p_enp_re, query))
p4 = re.compile(r'card_pk:\d+(:(true|false))?', flags=re.IGNORECASE)
p4i = bool(re.search(p4, query.lower()))
p5 = re.compile(r'phone:.+')
p5i = bool(re.search(p5, query))
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
c = None
has_phone_search = False
inc_archive = form and form.get('archive', False)
if extended_search and form:
q = {}
family = str(form.get('family', ''))
if family:
q['family__istartswith'] = family
name = str(form.get('name', ''))
if name:
q['name__istartswith'] = name
patronymic = str(form.get('patronymic', ''))
if patronymic:
q['patronymic__istartswith'] = patronymic
birthday = str(form.get('birthday', ''))
if birthday:
birthday_parts = birthday.split('.')
if len(birthday_parts) == 3:
if birthday_parts[0].isdigit():
q['birthday__day'] = int(birthday_parts[0])
if birthday_parts[1].isdigit():
q['birthday__month'] = int(birthday_parts[1])
if birthday_parts[2].isdigit():
q['birthday__year'] = int(birthday_parts[2])
objects = Individual.objects.all()
if q:
objects = objects.filter(**q)
enp_s = str(form.get('enp_s', ''))
enp_n = str(form.get('enp_n', ''))
if enp_n:
if enp_s:
objects = objects.filter(document__serial=enp_s, document__number=enp_s, document__document_type__title='Полис ОМС')
else:
objects = objects.filter(document__number=enp_n, document__document_type__title='Полис ОМС')
pass_s = str(form.get('pass_s', ''))
pass_n = str(form.get('pass_n', ''))
if pass_n:
objects = objects.filter(document__serial=pass_s, document__number=pass_n, document__document_type__title='Паспорт гражданина РФ')
snils = str(form.get('snils', ''))
if pass_n:
objects = objects.filter(document__number=snils, document__document_type__title='СНИЛС')
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
objects = objects.filter(card__medbook_number=medbook_number)
phone = str(form.get('phone', ''))
if phone:
normalized_phones = Phones.normalize_to_search(phone)
if normalized_phones:
objects = objects.filter(
Q(card__phones__normalized_number__in=normalized_phones)
| Q(card__phones__number__in=normalized_phones)
| Q(card__phone__in=normalized_phones)
| Q(card__doctorcall__phone__in=normalized_phones)
)
elif p5i or (always_phone_search and len(query) == 11 and query.isdigit()):
has_phone_search = True
phone = query.replace('phone:', '')
normalized_phones = Phones.normalize_to_search(phone)
objects = list(
Individual.objects.filter(
Q(card__phones__normalized_number__in=normalized_phones)
| Q(card__phones__number__in=normalized_phones)
| Q(card__phone__in=normalized_phones)
| Q(card__doctorcall__phone__in=normalized_phones)
)
)
elif p_enp:
if tfoms_module and not suggests:
from_tfoms = match_enp(query)
if from_tfoms and isinstance(from_tfoms, dict):
Individual.import_from_tfoms(from_tfoms)
objects = list(Individual.objects.filter(document__number=query, document__document_type__title='Полис ОМС'))
elif not p4i:
if inc_tfoms:
t_parts = re.search(p_tfoms, query.lower()).groups()
t_bd = "{}-{}-{}".format(t_parts[7], t_parts[6], t_parts[5])
from_tfoms = match_patient(t_parts[0], t_parts[1], t_parts[2], t_bd)
if isinstance(from_tfoms, list):
for t_row in from_tfoms:
if isinstance(t_row, dict):
Individual.import_from_tfoms(t_row, no_update=True)
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = list(
Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday, card__base=card_type)
)
if ((card_type.is_rmis and len(objects) == 0) or (card_type.internal_type and inc_rmis)) and not suggests:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base({"surname": query[0] + "%", "name": query[1] + "%", "patrName": query[2] + "%", "birthDate": btday}, fio=True)
except Exception as e:
logger.exception(e)
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
if len(split) > 3 or (len(split) == 3 and split[-1].isdigit()):
sbd = split[-1]
if len(sbd) == 8:
sbd = "{}.{}.{}".format(sbd[0:2], sbd[2:4], sbd[4:8])
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, card__base=card_type, birthday=datetime.datetime.strptime(sbd, "%d.%m.%Y").date())
if len(split) > 3:
objects.filter(patronymic__istartswith=p)
objects = objects[:10]
else:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, card__base=card_type)[:10]
if ((card_type.is_rmis and (len(objects) == 0 or (len(split) < 4 and len(objects) < 10))) or (card_type.internal_type and inc_rmis)) and not suggests:
objects = list(objects)
try:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(rmis_req, fio=True, limit=10 - len(objects))
except Exception as e:
logger.exception(e)
if (
(re.search(p3, query) and not card_type.is_rmis)
or (len(objects) == 0 and len(query) == 16 and not p_enp and card_type.internal_type)
or (card_type.is_rmis and not re.search(p3, query))
):
resync = True
if len(objects) == 0:
resync = False
try:
objects = Individual.objects.filter(card__number=query.upper(), card__base=card_type)
if not inc_archive:
objects = objects.filter(card__is_archive=False)
objects = list(objects)
if (card_type.is_rmis or card_type.internal_type) and len(objects) == 0 and len(query) == 16 and not suggests:
if not c:
c = Client(modules="patients")
objects += c.patients.import_individual_to_base(query)
elif not suggests:
resync = True
except Exception as e:
logger.exception(e)
if resync and card_type.is_rmis and not suggests:
if not c:
c = Client(modules="patients")
sema = threading.BoundedSemaphore(10)
threads = list()
def sync_i(ind_local: Individual, client: Client):
sema.acquire()
try:
ind_local.sync_with_rmis(c=client)
finally:
sema.release()
try:
connections.close_all()
logger.exception("Closed db connections")
except Exception as e:
logger.exception(f"Error closing connections {e}")
for obj in objects:
thread = threading.Thread(target=sync_i, args=(obj, c))
threads.append(thread)
thread.start()
if p4i:
parts = query.split(":")
cards = Card.objects.filter(pk=int(parts[1]))
inc_archive = inc_archive or (len(parts) > 2 and parts[2] == 'true')
else:
cards = Card.objects.filter(base=card_type, individual__in=objects)
if not has_phone_search and re.match(p3, query):
cards = cards.filter(number=query)
if p_enp and cards:
cards = cards.filter(carddocusage__document__number=query, carddocusage__document__document_type__title='Полис ОМС')
if cards:
medbook_number = str(form.get('medbookNumber', ''))
if medbook_number and SettingManager.l2('profcenter'):
cards = cards.filter(medbook_number=medbook_number)
d1, d2 = start_end_year()
if birthday_order:
cards = cards.order_by('-individual__birthday')
if not inc_archive:
cards = cards.filter(is_archive=False)
row: Card
for row in (
cards.select_related("individual", "base")
.prefetch_related(
Prefetch(
'individual__document_set',
queryset=Document.objects.filter(is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.select_related('document_type')
.order_by('pk'),
),
'phones_set',
)
.distinct()[:limit]
):
disp_data = sql_func.dispensarization_research(row.individual.sex, row.individual.age_for_year(), row.pk, d1, d2)
status_disp = 'finished'
if not disp_data:
status_disp = 'notneed'
else:
for i in disp_data:
if not i[4]:
status_disp = 'need'
break
data.append(
{
"type_title": card_type.title,
"base_pk": row.base_id,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"fio_age": row.individual.fio(full=True),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"isArchive": row.is_archive,
"pk": row.pk,
"phones": Phones.phones_to_normalized_list(row.phones_set.all(), row.phone),
"main_diagnosis": row.main_diagnosis,
"docs": [
*[
{
"pk": x.pk,
"type_title": x.document_type.title,
"document_type_id": x.document_type_id,
"serial": x.serial,
"number": x.number,
"is_active": x.is_active,
"date_start": x.date_start,
"date_end": x.date_end,
"who_give": x.who_give,
"from_rmis": x.from_rmis,
"rmis_uid": x.rmis_uid,
}
for x in row.individual.document_set.all()
],
*(
[
{
"pk": -10,
"type_title": "Номер мед.книжки",
"document_type_id": -10,
"serial": row.medbook_prefix,
"number": str(row.medbook_number),
"is_active": True,
"date_start": None,
"date_end": None,
"who_give": "",
"from_rmis": False,
"rmis_uid": None,
}
]
if row.medbook_number
else []
),
],
"medbookNumber": f"{row.medbook_prefix} {row.medbook_number}".strip(),
"status_disp": status_disp,
"disp_data": disp_data,
}
)
return JsonResponse({"results": data})
@login_required
def patients_search_individual(request):
objects = []
data = []
d = json.loads(request.body)
query = d['query'].strip()
p = re.compile(r'[а-яё]{3}[0-9]{8}', re.IGNORECASE)
p2 = re.compile(r'^([А-яЁё\-]+)( ([А-яЁё\-]+)(( ([А-яЁё\-]*))?( ([0-9]{2}\.[0-9]{2}\.[0-9]{4}))?)?)?$')
p4 = re.compile(r'individual_pk:\d+')
pat_bd = re.compile(r"\d{4}-\d{2}-\d{2}")
if re.search(p, query.lower()):
initials = query[0:3].upper()
btday = query[7:11] + "-" + query[5:7] + "-" + query[3:5]
if not pat_bd.match(btday):
return JsonResponse([], safe=False)
try:
objects = Individual.objects.filter(family__startswith=initials[0], name__startswith=initials[1], patronymic__startswith=initials[2], birthday=btday)
except ValidationError:
objects = []
elif re.search(p2, query):
f, n, p, rmis_req, split = full_patient_search_data(p, query)
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p)
if len(split) > 3:
objects = Individual.objects.filter(family__istartswith=f, name__istartswith=n, patronymic__istartswith=p, birthday=datetime.datetime.strptime(split[3], "%d.%m.%Y").date())
if re.search(p4, query):
objects = Individual.objects.filter(pk=int(query.split(":")[1]))
n = 0
if not isinstance(objects, list):
for row in objects.distinct().order_by("family", "name", "patronymic", "birthday"):
n += 1
data.append({"family": row.family, "name": row.name, "patronymic": row.patronymic, "birthday": row.bd(), "age": row.age_s(), "sex": row.sex, "pk": row.pk})
if n == 25:
break
return JsonResponse({"results": data})
def patients_search_l2_card(request):
data = []
request_data = json.loads(request.body)
cards = Card.objects.filter(pk=request_data.get('card_pk', -1))
if cards.exists():
card_orig = cards[0]
Card.add_l2_card(card_orig=card_orig)
l2_cards = Card.objects.filter(individual=card_orig.individual, base__internal_type=True)
for row in l2_cards.filter(is_archive=False):
docs = (
Document.objects.filter(individual__pk=row.individual_id, is_active=True, document_type__title__in=['СНИЛС', 'Паспорт гражданина РФ', 'Полис ОМС'])
.distinct("pk", "number", "document_type", "serial")
.order_by('pk')
)
data.append(
{
"type_title": row.base.title,
"num": row.number,
"is_rmis": row.base.is_rmis,
"family": row.individual.family,
"name": row.individual.name,
"twoname": row.individual.patronymic,
"birthday": row.individual.bd(),
"age": row.individual.age_s(),
"sex": row.individual.sex,
"individual_pk": row.individual_id,
"base_pk": row.base_id,
"pk": row.pk,
"phones": row.get_phones(),
"docs": [{**model_to_dict(x), "type_title": x.document_type.title} for x in docs],
"main_diagnosis": row.main_diagnosis,
}
)
return JsonResponse({"results": data})
@login_required
def patients_get_card_data(request, card_id):
card = Card.objects.get(pk=card_id)
c = model_to_dict(card)
i = model_to_dict(card.individual)
docs = [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=card.individual).distinct('pk', "number", "document_type", "serial").order_by('pk')
]
rc = Card.objects.filter(base__is_rmis=True, individual=card.individual)
d = District.objects.all().order_by('-sort_weight', '-id')
return JsonResponse(
{
**i,
**c,
"docs": docs,
"main_docs": card.get_card_documents(),
"main_address_full": card.main_address_full,
"fact_address_full": card.fact_address_full,
"has_rmis_card": rc.exists(),
"av_companies": [{"id": -1, "title": "НЕ ВЫБРАНО", "short_title": ""}, *[model_to_dict(x) for x in Company.objects.filter(active_status=True).order_by('title')]],
"custom_workplace": card.work_place != "",
"work_place_db": card.work_place_db_id or -1,
"district": card.district_id or -1,
"districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=False)]],
"ginekolog_district": card.ginekolog_district_id or -1,
"gin_districts": [{"id": -1, "title": "НЕ ВЫБРАН"}, *[{"id": x.pk, "title": x.title} for x in d.filter(is_ginekolog=True)]],
"agent_types": [{"key": x[0], "title": x[1]} for x in Card.AGENT_CHOICES if x[0]],
"excluded_types": Card.AGENT_CANT_SELECT,
"agent_need_doc": Card.AGENT_NEED_DOC,
"mother": None if not card.mother else card.mother.get_fio_w_card(),
"mother_pk": card.mother_id,
"father": None if not card.father else card.father.get_fio_w_card(),
"father_pk": card.father_id,
"curator": None if not card.curator else card.curator.get_fio_w_card(),
"curator_pk": card.curator_id,
"agent": None if not card.agent else card.agent.get_fio_w_card(),
"agent_pk": card.agent_id,
"payer": None if not card.payer else card.payer.get_fio_w_card(),
"payer_pk": card.payer_id,
"rmis_uid": rc[0].number if rc.exists() else None,
"doc_types": [{"pk": x.pk, "title": x.title} for x in DocumentType.objects.all()],
"number_poli": card.number_poliklinika,
"harmful": card.harmful_factor,
"medbookPrefix": card.medbook_prefix,
"medbookNumber": card.medbook_number,
"medbookNumberCustom": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookNumberCustomOriginal": card.medbook_number if card.medbook_type == 'custom' else '',
"medbookType": card.medbook_type,
"medbookTypePrev": card.medbook_type,
"isArchive": card.is_archive,
}
)
@login_required
@group_required("Картотека L2", "Лечащий врач", "Врач-лаборант", "Оператор лечащего врача", "Оператор Контакт-центра")
def patients_card_save(request):
request_data = json.loads(request.body)
message = ""
messages = []
if "new_individual" in request_data and (request_data["new_individual"] or not Individual.objects.filter(pk=request_data["individual_pk"])) and request_data["card_pk"] < 0:
i = Individual(family=request_data["family"], name=request_data["name"], patronymic=request_data["patronymic"], birthday=request_data["birthday"], sex=request_data["sex"])
i.save()
else:
changed = False
i = Individual.objects.get(pk=request_data["individual_pk"] if request_data["card_pk"] < 0 else Card.objects.get(pk=request_data["card_pk"]).individual_id)
if (
i.family != request_data["family"]
or i.name != request_data["name"]
or i.patronymic != request_data["patronymic"]
or str(i.birthday) != request_data["birthday"]
or i.sex != request_data["sex"]
):
changed = True
i.family = request_data["family"]
i.name = request_data["name"]
i.patronymic = request_data["patronymic"]
i.birthday = datetime.datetime.strptime(request_data["birthday"], "%d.%m.%Y" if '.' in request_data["birthday"] else "%Y-%m-%d").date()
i.sex = request_data["sex"]
i.save()
if Card.objects.filter(individual=i, base__is_rmis=True).exists() and changed:
try:
c = Client(modules=["individuals", "patients"])
c.patients.send_patient(Card.objects.filter(individual=i, base__is_rmis=True)[0])
except:
messages.append("Синхронизация с РМИС не удалась")
individual_pk = i.pk
if request_data["card_pk"] < 0:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
c = Card(number=Card.next_l2_n(), base=base, individual=i, main_diagnosis="", main_address="", fact_address="")
c.save()
card_pk = c.pk
Log.log(card_pk, 30000, request.user.doctorprofile, request_data)
else:
card_pk = request_data["card_pk"]
c = Card.objects.get(pk=card_pk)
individual_pk = request_data["individual_pk"]
c.main_diagnosis = request_data["main_diagnosis"]
try:
vals = json.loads(request_data["main_address_full"])
c.main_address = vals['address']
c.main_address_fias = vals['fias']
c.main_address_details = vals['details']
except:
c.main_address = request_data["main_address"]
c.main_address_fias = None
c.main_address_details = None
try:
vals = json.loads(request_data["fact_address_full"])
c.fact_address = vals['address']
c.fact_address_fias = vals['fias']
c.fact_address_details = vals['details']
except:
c.fact_address = request_data["fact_address"]
c.fact_address_fias = None
c.fact_address_details = None
c.number_poliklinika = request_data.get("number_poli", "")
if request_data["custom_workplace"] or not Company.objects.filter(pk=request_data.get("work_place_db", -1)).exists():
c.work_place_db = None
c.work_place = request_data["work_place"] if request_data["custom_workplace"] else ''
else:
c.work_place_db = Company.objects.get(pk=request_data["work_place_db"])
c.work_place = ''
c.district_id = request_data["district"] if request_data["district"] != -1 else None
c.ginekolog_district_id = request_data["gin_district"] if request_data["gin_district"] != -1 else None
c.work_position = request_data["work_position"]
c.phone = request_data["phone"]
c.harmful_factor = request_data.get("harmful", "")
medbook_type = request_data.get("medbookType", "")
medbook_prefix = str(request_data.get("medbookPrefix", "")).strip()
medbook_number = str(request_data.get("medbookNumber", "-1"))
medbook_number_custom = str(request_data.get("medbookNumberCustom", "-1"))
medbook_number = medbook_number if medbook_type != 'custom' else medbook_number_custom
medbook_number_int = int(medbook_number) if medbook_number.isdigit() else None
if medbook_type == 'none' and c.medbook_type != 'none':
c.medbook_number = ''
c.medbook_type = medbook_type
else:
try:
with transaction.atomic():
base = CardBase.objects.select_for_update().get(pk=request_data["base_pk"], internal_type=True)
if medbook_type == 'custom' and medbook_number_int is not None and (c.medbook_number != medbook_number_int or c.medbook_prefix != medbook_prefix):
medbook_auto_start = SettingManager.get_medbook_auto_start()
if medbook_number_int <= 1 or medbook_auto_start <= medbook_number_int:
raise Exception("Некорректный номер мед.книжки")
if Card.objects.filter(medbook_number=medbook_number, base=base, medbook_prefix=medbook_prefix).exclude(pk=c.pk).exists():
raise Exception(f"Номер {medbook_prefix} {medbook_number} уже есть у другого пациента")
c.medbook_prefix = medbook_prefix
c.medbook_number = medbook_number_int
c.medbook_type = medbook_type
elif (c.medbook_type != 'auto' or c.medbook_number == '') and medbook_type == 'auto':
c.medbook_prefix = ''
c.medbook_number = Card.next_medbook_n()
c.medbook_type = medbook_type
except Exception as e:
messages.append(str(e))
c.save()
if c.individual.primary_for_rmis:
try:
c.individual.sync_with_rmis()
except:
messages.append("Синхронизация с РМИС не удалась")
result = "ok"
return JsonResponse({"result": result, "message": message, "messages": messages, "card_pk": card_pk, "individual_pk": individual_pk})
@login_required
@group_required("Управление иерархией истории")
def patients_card_archive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
card.is_archive = True
card.save()
return JsonResponse({"ok": True})
@login_required
@group_required("Управление иерархией истории")
def patients_card_unarchive(request):
request_data = json.loads(request.body)
pk = request_data['pk']
card = Card.objects.get(pk=pk)
if card.is_archive:
n = card.number
if Card.objects.filter(number=n, is_archive=False, base=card.base).exists():
return JsonResponse({"ok": False, "message": "fНомер {n} уже занят другой картой"})
card.is_archive = False
card.save()
return JsonResponse({"ok": True})
def individual_search(request):
result = []
request_data = json.loads(request.body)
tfoms_module = SettingManager.l2('tfoms')
family = request_data["family"]
name = request_data["name"]
patronymic = request_data["patronymic"]
birthday = request_data["birthday"]
forced_gender = []
if tfoms_module and family and name and birthday:
from_tfoms = match_patient(family, name, patronymic, birthday)
for row in from_tfoms:
Individual.import_from_tfoms(row, no_update=True)
forced_gender.append(row['gender'].lower())
for i in Individual.objects.filter(family=family, name=name, patronymic=patronymic, birthday=birthday):
result.append(
{
"pk": i.pk,
"fio": i.fio(full=True),
"docs": [
{**model_to_dict(x), "type_title": x.document_type.title}
for x in Document.objects.filter(individual=i, is_active=True).distinct("number", "document_type", "serial", "date_end", "date_start")
],
"l2_cards": [{"number": x.number, "pk": x.pk} for x in Card.objects.filter(individual=i, base__internal_type=True, is_archive=False)],
}
)
forced_gender.append(i.sex)
forced_gender = None if not forced_gender or forced_gender.count(forced_gender[0]) != len(forced_gender) else forced_gender[0]
return JsonResponse({"result": result, 'forced_gender': forced_gender})
def get_sex_by_param(request):
request_data = json.loads(request.body)
t = request_data.get("t")
v = request_data.get("v", "")
r = "м"
if t == "name":
p = Individual.objects.filter(name=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "family":
p = Individual.objects.filter(family=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
if t == "patronymic":
p = Individual.objects.filter(patronymic=v)
r = "м" if p.filter(sex__iexact="м").count() >= p.filter(sex__iexact="ж").count() else "ж"
return JsonResponse({"sex": r})
def edit_doc(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
serial = request_data["serial"].strip()
number = request_data["number"].strip()
type_o = DocumentType.objects.get(pk=request_data["type"])
is_active = request_data["is_active"]
date_start = request_data["date_start"]
date_start = None if date_start == "" else date_start
date_end = request_data["date_end"]
date_end = None if date_end == "" else date_end
who_give = (request_data["who_give"] or "").strip()
if pk == -1:
card = Card.objects.get(pk=request_data["card_pk"])
d = Document(
document_type=type_o,
number=number,
serial=serial,
from_rmis=False,
date_start=date_start,
date_end=date_end,
who_give=who_give,
is_active=is_active,
individual=Individual.objects.get(pk=request_data["individual_pk"]),
)
d.save()
cdu = CardDocUsage.objects.filter(card=card, document__document_type=type_o)
if not cdu.exists():
CardDocUsage(card=card, document=d).save()
else:
for c in cdu:
c.document = d
c.save(update_fields=["document"])
Log.log(d.pk, 30002, request.user.doctorprofile, request_data)
else:
for d in Document.objects.filter(pk=pk, from_rmis=False):
d.number = number
d.serial = serial
d.is_active = is_active
d.date_start = date_start
d.date_end = date_end
d.who_give = who_give
d.save()
Log.log(pk, 30002, request.user.doctorprofile, request_data)
d = Document.objects.get(pk=pk)
try:
d.sync_rmis()
except Exception as e:
print('RMIS error', e) # noqa: T001
return JsonResponse({"ok": True})
def update_cdu(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
doc = Document.objects.get(pk=request_data["doc"])
cdu = CardDocUsage.objects.filter(card=card, document__document_type=doc.document_type)
if not cdu.exists():
CardDocUsage(card=card, document=doc).save()
else:
for c in cdu:
c.document = doc
c.save(update_fields=["document"])
Log.log(card.pk, 30004, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def sync_rmis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
card.individual.sync_with_rmis()
return JsonResponse({"ok": True})
def sync_tfoms(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
is_new, updated = card.individual.sync_with_tfoms()
return JsonResponse({"ok": True, "is_new": is_new, "updated": updated})
def update_wia(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
key = request_data["key"]
if key in [x[0] for x in Card.AGENT_CHOICES]:
card.who_is_agent = key
card.save()
Log.log(card.pk, 30006, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def edit_agent(request):
request_data = json.loads(request.body)
key = request_data["key"]
card = None if not request_data["card_pk"] else Card.objects.get(pk=request_data["card_pk"])
parent_card = Card.objects.filter(pk=request_data["parent_card_pk"])
doc = request_data["doc"] or ''
clear = request_data["clear"]
need_doc = key in Card.AGENT_NEED_DOC
upd = {}
if clear or not card:
upd[key] = None
if need_doc:
upd[key + "_doc_auth"] = ''
if parent_card[0].who_is_agent == key:
upd["who_is_agent"] = ''
else:
upd[key] = card
if need_doc:
upd[key + "_doc_auth"] = doc
if key not in Card.AGENT_CANT_SELECT:
upd["who_is_agent"] = key
for card in parent_card:
for k, v in upd.items():
setattr(card, k, v)
card.save(update_fields=list(upd.keys()))
Log.log(request_data["parent_card_pk"], 30005, request.user.doctorprofile, request_data)
return JsonResponse({"ok": True})
def load_dreg(request):
request_data = json.loads(request.body)
data = []
diagnoses = set()
for a in DispensaryReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"diagnos": a.diagnos,
"illnes": a.illnes,
"spec_reg": '' if not a.spec_reg else a.spec_reg.title,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
"why_stop": a.why_stop,
}
)
if not a.date_end:
diagnoses.add(a.diagnos)
researches = []
specialities = []
researches_data = []
specialities_data = []
card = Card.objects.get(pk=request_data["card_pk"])
visits = VisitPurpose.objects.filter(title__icontains="диспансерн")
year = request_data.get('year', '2020')
for d in sorted(diagnoses):
need = DispensaryPlan.objects.filter(diagnos=d)
for i in need:
if i.research:
if i.research not in researches:
researches.append(i.research)
results = research_last_result_every_month([i.research], card, year)
plans = get_dispensary_reg_plans(card, i.research, None, year)
researches_data.append(
{
"type": "research",
"research_title": i.research.title,
"research_pk": i.research.pk,
"assign_research_pk": i.research.pk,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_res = researches.index(i.research)
researches_data[index_res]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
if i.speciality:
if i.speciality not in specialities:
specialities.append(i.speciality)
results = research_last_result_every_month(Researches.objects.filter(speciality=i.speciality), request_data["card_pk"], year, visits)
plans = get_dispensary_reg_plans(card, None, i.speciality, year)
spec_assign_research = Researches.objects.filter(speciality=i.speciality).first()
specialities_data.append(
{
"type": "speciality",
"research_title": i.speciality.title,
"research_pk": i.speciality.pk,
"assign_research_pk": spec_assign_research.pk if spec_assign_research else None,
"assignment": False,
"diagnoses_time": [],
"results": results,
"plans": plans,
"max_time": 1,
"times": len([x for x in results if x]),
}
)
index_spec = specialities.index(i.speciality)
specialities_data[index_spec]['diagnoses_time'].append({"diagnos": i.diagnos, "times": i.repeat})
researches_data.extend(specialities_data)
return JsonResponse({"rows": data, "researches_data": researches_data, "year": year})
def load_screening(request):
card_pk: int = data_parse(request.body, {'cardPk': int})[0]
screening = ScreeningRegPlan.get_screening_data(card_pk)
return JsonResponse({"data": screening})
def research_last_result_every_month(researches: List[Researches], card: Card, year: str, visits: Optional[List[VisitPurpose]] = None):
results = []
filter = {
"napravleniye__client": card,
"research__in": researches,
"time_confirmation__year": year,
}
if visits:
filter['purpose__in'] = visits
for i in range(12):
i += 1
iss: Optional[Issledovaniya] = Issledovaniya.objects.filter(**filter, time_confirmation__month=str(i)).order_by("-time_confirmation").first()
if iss:
date = str(localtime(iss.time_confirmation).day).rjust(2, '0')
results.append({"pk": iss.napravleniye_id, "date": date})
else:
results.append(None)
return results
def get_dispensary_reg_plans(card, research, speciality, year):
plan = [''] * 12
disp_plan = DispensaryRegPlans.objects.filter(card=card, research=research, speciality=speciality, date__year=year)
for d in disp_plan:
if d.date:
plan[d.date.month - 1] = str(d.date.day).rjust(2, '0')
return plan
def update_dispensary_reg_plans(request):
request_data = json.loads(request.body)
DispensaryRegPlans.update_plan(request_data["card_pk"], request_data["researches_data_def"], request_data["researches_data"], request_data["year"])
return JsonResponse({"ok": True})
def update_screening_reg_plan(request):
request_data = json.loads(request.body)
ScreeningRegPlan.update_plan(request_data)
return JsonResponse({"ok": True})
def load_vaccine(request):
request_data = json.loads(request.body)
data = []
for a in VaccineReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "title": a.title, "series": a.series, "amount": a.amount, "method": a.method, "step": a.step, "tap": a.tap})
return JsonResponse({"rows": data})
def load_ambulatory_data(request):
request_data = json.loads(request.body)
data = []
for a in AmbulatoryData.objects.filter(card__pk=request_data["card_pk"]).order_by('date', 'pk'):
data.append({"pk": a.pk, "date": strdate(a.date) if a.date else '', "data": a.data})
return JsonResponse({"rows": data})
def load_benefit(request):
request_data = json.loads(request.body)
data = []
for a in BenefitReg.objects.filter(card__pk=request_data["card_pk"]).order_by('date_start', 'pk'):
data.append(
{
"pk": a.pk,
"benefit": str(a.benefit),
"registration_basis": a.registration_basis,
"doc_start_reg": '' if not a.doc_start_reg else a.doc_start_reg.get_fio(),
"doc_start_reg_id": a.doc_start_reg_id,
"date_start": '' if not a.date_start else strdate(a.date_start),
"doc_end_reg": '' if not a.doc_end_reg else a.doc_end_reg.get_fio(),
"doc_end_reg_id": a.doc_end_reg_id,
"date_end": '' if not a.date_end else strdate(a.date_end),
}
)
return JsonResponse({"rows": data})
def load_dreg_detail(request):
a = DispensaryReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"diagnos": a.diagnos + ' ' + a.illnes,
"date_start": None if not a.date_start else a.date_start,
"date_end": None if not a.date_end else a.date_end,
"close": bool(a.date_end),
"why_stop": a.why_stop,
"time_index": a.what_times,
"identified_index": a.how_identified,
}
return JsonResponse(data)
def load_vaccine_detail(request):
a = VaccineReg.objects.get(pk=json.loads(request.body)["pk"])
data = {
"date": a.date,
"direction": a.direction,
"title": a.title,
"series": a.series,
"amount": a.amount,
"method": a.method,
"step": a.step,
"tap": a.tap,
"comment": a.comment,
}
return JsonResponse(data)
def load_ambulatory_data_detail(request):
a = AmbulatoryData.objects.get(pk=json.loads(request.body)["pk"])
str_adate = str(a.date)[0:7]
data = {
"date": str_adate,
"data": a.data,
}
return JsonResponse(data)
def load_ambulatory_history(request):
request_data = json.loads(request.body)
result = AmbulatoryDataHistory.objects.filter(card__pk=request_data["card_pk"]).order_by('-created_at')
rows = [{'date': strdate(i.created_at), 'data': i.text} for i in result]
return JsonResponse({"rows": rows})
def load_benefit_detail(request):
pk = json.loads(request.body)["card_pk"]
if pk > -1:
a = BenefitReg.objects.get(pk=pk)
data = {
"benefit_id": a.benefit_id,
"registration_basis": a.registration_basis,
"date_start": '' if not a.date_start else a.date_start,
"date_end": '' if not a.date_end else a.date_end,
"close": bool(a.date_end),
}
else:
data = {
"benefit_id": -1,
"registration_basis": "",
"date_start": '',
"date_end": '',
"close": False,
}
return JsonResponse(
{
"types": [{"pk": -1, "title": 'Не выбрано'}, *[{"pk": x.pk, "title": str(x)} for x in BenefitType.objects.filter(hide=False).order_by('pk')]],
**data,
}
)
@transaction.atomic
def save_dreg(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
create_disp_record = False
if pk == -1:
a = DispensaryReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
create_disp_record = True
else:
pk = rd["pk"]
a = DispensaryReg.objects.get(pk=pk)
Log.log(pk, 40000 if n else 40001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if (
not a.date_start
and d["date_start"]
or str(a.date_start) != fd(d["date_start"])
or a.spec_reg != request.user.doctorprofile.specialities
or a.doc_start_reg != request.user.doctorprofile
):
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
a.spec_reg = request.user.doctorprofile.specialities
c = True
if not a.date_end and d["close"] or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.why_stop = d["why_stop"]
a.doc_end_reg = request.user.doctorprofile
c = True
elif d["close"] and a.why_stop != d["why_stop"]:
a.why_stop = d["why_stop"]
c = True
if not d["close"] and (a.date_end or a.why_stop):
a.date_end = None
a.why_stop = ''
a.doc_end_reg = None
c = True
i = d["diagnos"].split(' ')
ds = i.pop(0)
if len(i) == 0:
i = ''
else:
i = ' '.join(i)
if a.diagnos != ds or a.illnes != i:
a.diagnos = ds
a.illnes = i
if create_disp_record:
disp_obj = DispensaryReg.objects.filter(card_id=rd["card_pk"], diagnos=ds, date_start=fd(d["date_start"]), doc_start_reg=request.user.doctorprofile)
if disp_obj.exists():
a.delete()
return JsonResponse({"ok": False, "pk": -1, "c": False})
c = True
if d.get('identified_index', 0) != a.how_identified:
a.how_identified = d.get('identified_index', 0)
c = True
if d.get('time_index', 0) != a.what_times:
a.what_times = d.get('time_index', 0)
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_vaccine(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
if pk == -1:
a = VaccineReg.objects.create(card_id=rd["card_pk"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = VaccineReg.objects.get(pk=pk)
Log.log(pk, 70000 if n else 70001, request.user.doctorprofile, rd)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(d["date"]):
a.date = fd(d["date"])
c = True
if a.direction != d["direction"]:
a.direction = d["direction"]
c = True
if a.title != d["title"]:
a.title = d["title"]
c = True
if a.series != d["series"]:
a.series = d["series"]
c = True
if a.amount != d["amount"]:
a.amount = d["amount"]
c = True
if a.step != d["step"]:
a.step = d["step"]
c = True
if a.tap != d["tap"]:
a.tap = d["tap"]
c = True
if a.comment != d["comment"]:
a.comment = d["comment"]
c = True
if a.method != d["method"]:
a.method = d["method"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_ambulatory_data(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
date_request = f"{d['date']}-01"
if pk == -1:
a = AmbulatoryData.objects.create(card_id=rd["card_pk"])
pk = a.pk
else:
pk = rd["pk"]
a = AmbulatoryData.objects.get(pk=pk)
c = False
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if str(a.date) != fd(date_request):
a.date = fd(date_request)
c = True
if a.data != d["data"]:
a.data = d["data"]
c = True
if not a.doc:
a.doc = request.user.doctorprofile
c = True
if c:
a.save()
AmbulatoryDataHistory.save_ambulatory_history(rd["card_pk"], request.user.doctorprofile)
return JsonResponse({"ok": True, "pk": pk, "c": c})
@transaction.atomic
def save_benefit(request):
rd = json.loads(request.body)
d = rd["data"]
pk = rd["pk"]
n = False
c = False
if pk == -1:
a = BenefitReg.objects.create(card_id=rd["card_pk"], benefit_id=d["benefit_id"])
pk = a.pk
n = True
else:
pk = rd["pk"]
a = BenefitReg.objects.get(pk=pk)
if a.benefit_id != d["benefit_id"]:
a.benefit_id = d["benefit_id"]
c = True
Log.log(pk, 50000 if n else 50001, request.user.doctorprofile, {**rd, "data": {**{k: v for k, v in rd["data"].items() if k not in ['types']}}})
def fd(s):
if '.' in s:
s = s.split('.')
s = '{}-{}-{}'.format(s[2], s[1], s[0])
return s
if not a.date_start and d["date_start"] or str(a.date_start) != fd(d["date_start"]) or a.doc_start_reg != request.user.doctorprofile:
a.date_start = fd(d["date_start"])
a.doc_start_reg = request.user.doctorprofile
c = True
if a.registration_basis != d["registration_basis"]:
a.registration_basis = d["registration_basis"]
c = True
if not a.date_end and d["close"] or (d["close"] and a.doc_end_reg != request.user.doctorprofile) or (d["close"] and str(a.date_end) != fd(d["date_end"])):
a.date_end = fd(d["date_end"])
a.doc_end_reg = request.user.doctorprofile
c = True
if not d["close"] and a.date_end:
a.date_end = None
a.doc_end_reg = None
c = True
if c:
a.save()
return JsonResponse({"ok": True, "pk": pk, "c": c})
def load_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
history = []
for a in AnamnesisHistory.objects.filter(card=card).order_by('-pk'):
history.append(
{
"pk": a.pk,
"text": a.text,
"who_save": {
"fio": a.who_save.get_fio(dots=True),
"department": a.who_save.podrazdeleniye.get_title(),
},
"datetime": a.created_at.astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%d.%m.%Y %X"),
}
)
data = {
"text": card.anamnesis_of_life,
"history": history,
}
return JsonResponse(data)
def save_anamnesis(request):
request_data = json.loads(request.body)
card = Card.objects.get(pk=request_data["card_pk"])
if card.anamnesis_of_life != request_data["text"]:
card.anamnesis_of_life = request_data["text"]
card.save()
AnamnesisHistory(card=card, text=request_data["text"], who_save=request.user.doctorprofile).save()
return JsonResponse({"ok": True})
def create_l2_individual_from_card(request):
request_data = json.loads(request.body)
polis = request_data['polis']
has_tfoms_data = False
if SettingManager.l2('tfoms'):
from_tfoms = match_enp(polis)
if from_tfoms:
has_tfoms_data = True
Individual.import_from_tfoms(from_tfoms, no_update=True)
if not has_tfoms_data:
Individual.import_from_tfoms(
{
"enp": polis,
"family": request_data['family'],
"given": request_data['name'],
"patronymic": request_data['patronymic'],
"gender": request_data['sex'],
"birthdate": request_data['bdate'],
},
no_update=True,
)
return JsonResponse({"ok": True})
def is_l2_card(request):
request_data = json.loads(request.body)
card = Card.objects.filter(number=request_data['number'], base__internal_type=True).first()
if card:
return JsonResponse({"ok": True, "individual_fio": card.individual.fio()})
else:
return JsonResponse({"ok": False})
|
utils.py
|
# -*- coding: utf-8 -*-
from __future__ import division
import os
import sys
import socket
import signal
import functools
import atexit
import tempfile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from time import sleep
try:
import simplejson as json
except ImportError:
import json
from .exceptions import CommandError, TimeoutWaitingFor
ON_POSIX = 'posix' in sys.builtin_module_names
# Directory relative to basetest module location
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Location of binary files (usually the src/ folder)
BIN_PREFIX = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "..", "src")
)
# Default location of test certificates
DEFAULT_CERT_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_certs")
)
# Default location of test extensions
DEFAULT_EXTENSION_PATH = os.path.abspath(
os.path.join(CURRENT_DIR, "..", "test_extensions")
)
# Environment flags to control skipping of anomaly tests
ANOMALY_SKIP = os.environ.get("ANOMALY_SKIP", False)
# Environment flags to control use of PATH or in-tree binaries
ANOMALY_USE_PATH = os.environ.get("ANOMALY_USE_PATH", False)
UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}")
def anomaly_binary_location(cmd="anomaly"):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
return binary_location(cmd, ANOMALY_USE_PATH)
def binary_location(cmd, USE_PATH=False):
""" ../src/ is used by default.
"""
return os.path.join(BIN_PREFIX, cmd)
def wait_condition(cond, timeout=1, sleeptime=.01):
"""Wait for condition to return anything other than None
"""
# NOTE Increasing sleeptime can dramatically increase testsuite runtime
# It also reduces CPU load significantly
if timeout is None:
timeout = 1
if timeout < sleeptime:
print("Warning, timeout cannot be smaller than", sleeptime)
timeout = sleeptime
# Max number of attempts until giving up
tries = int(timeout / sleeptime)
for i in range(tries):
val = cond()
if val is not None:
break
sleep(sleeptime)
return val
def wait_process(pid, timeout=None):
"""Wait for process to finish
"""
def process():
try:
os.kill(pid, 0)
except OSError:
# Process is dead
return True
else:
# Process is still ticking
return None
return wait_condition(process, timeout)
def _queue_output(arguments, pidq, outputq):
"""Read/Write output/input of given process.
This function is meant to be executed in a thread as it may block
"""
kwargs = arguments["process"]
input = arguments["input"]
try:
proc = Popen(**kwargs)
except OSError as e:
# pid None is read by the main thread as a crash of the process
pidq.put(None)
outputq.put((
"",
("Unexpected exception caught during execution: '{0}' . ".format(e)),
255)) # false exitcode
return
# Put the PID in the queue for main process to know.
pidq.put(proc.pid)
# Send input and wait for finish
out, err = proc.communicate(input)
if sys.version_info > (3,):
out, err = out.decode('utf-8'), err.decode('utf-8')
# Give the output back to the caller
outputq.put((out, err, proc.returncode))
def _retrieve_output(thread, timeout, queue, thread_error):
"""Fetch output from binary subprocess queues
"""
# Try to join the thread on failure abort
thread.join(timeout)
if thread.isAlive():
# Join should have killed the thread. This is unexpected
raise TimeoutWaitingFor(thread_error + ". Unexpected error")
# Thread died so we should have output
try:
# data = (stdout, stderr, exitcode)
data = queue.get(timeout=timeout)
except Empty:
data = TimeoutWaitingFor("streams from program")
return data
def _get_output(arguments, timeout=None):
"""Collect output from the subprocess without blocking the main process if
subprocess hangs.
"""
# NOTE Increase this value if tests fail with None being received as
# stdout/stderr instead of the expected content
output_timeout = 0.1 # seconds
pidq = Queue()
outputq = Queue()
t = Thread(target=_queue_output, args=(arguments, pidq, outputq))
t.daemon = True
t.start()
try:
pid = pidq.get(timeout=timeout)
except Empty:
pid = None
# Process crashed or timed out for some reason
if pid is None:
return _retrieve_output(t, output_timeout, outputq,
"Program to start")
# Wait for process to finish (normal execution)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program thread to join")
# If we reach this point we assume the process got stuck or timed out
for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL):
# Start with lower signals and escalate if process ignores them
try:
os.kill(pid, signal.SIGABRT)
except OSError as e:
# 3 means the process finished/died between last check and now
if e.errno != 3:
raise
# Wait for process to finish (should die/exit after signal)
state = wait_process(pid, timeout)
if state:
# Process finished
return _retrieve_output(t, output_timeout, outputq,
"Program to die")
# This should never happen but in case something goes really bad
raise OSError("Program stopped responding and couldn't be killed")
def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE,
merge_streams=False, env=os.environ, timeout=None):
"Run a subprocess and wait for it to finish"
if input is None:
stdin = None
else:
stdin = PIPE
if merge_streams:
stderr = STDOUT
else:
stderr = PIPE
arguments = {
"process": {
"args": cmd,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"bufsize": 1,
"close_fds": ON_POSIX,
"env": env,
},
"input": input,
}
out, err, exit = _get_output(arguments, timeout)
if merge_streams:
if exit != 0:
raise CommandError(cmd, exit, out)
else:
return exit, out
else:
if exit != 0:
raise CommandError(cmd, exit, out, err)
else:
return exit, out, err
def run_cmd_wait_nofail(*args, **kwargs):
"Same as run_cmd_wait but silence the exception if it happens"
try:
return run_cmd_wait(*args, **kwargs)
except CommandError as e:
return e.code, e.out, e.err
def memoize(obj):
"""Keep an in-memory cache of function results given it's inputs
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
try:
from shutil import which
which = memoize(which)
except ImportError:
# NOTE: This is shutil.which backported from python-3.3.3
@memoize
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def parse_datafile(file):
"""Parse .data files, treating files as JSON
"""
data = []
with open(file) as fh:
for line in fh:
line = line.rstrip("\n")
# Turn [] strings into {} to be treated properly as JSON hashes
if line.startswith('[') and line.endswith(']'):
line = '{' + line[1:-1] + '}'
if line.startswith("{"):
data.append(json.loads(line))
else:
data.append(line)
return data
def mkstemp(data):
"""
Create a temporary file that is removed at process exit
"""
def rmtemp(name):
try:
os.remove(name)
except OSError:
pass
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data)
f.close()
# Ensure removal at end of python session
atexit.register(rmtemp, f.name)
return f.name
def mkstemp_exec(data):
"""Create a temporary executable file that is removed at process exit
"""
name = mkstemp(data)
os.chmod(name, 0o755)
return name
# vim: ai sts=4 et sw=4
|
__main__.py
|
"""
This is what happens when __main__ gets called. We dance:
- initialize an Administration object
- process params, sort of like you would with optparse
- analyze params, call methods of the Administration obect
"""
# standard library imports
import argparse
from collections import OrderedDict
import itertools
import socket
import sys
import threading
import time
# third party imports
from bson.objectid import ObjectId
import gridfs
from pymongo import MongoClient
# local imports
from app import API
from app import models, utils
from app.admin import clone, patch, pickle_login, purge
#
# misc. helper methods for CLI admin tasks
#
# CLI colors
class Style:
""" A bogus class used to make it easier to inject color into print()
statements (of which we have more than a few in this module)."""
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# API stat
def stat():
""" Summarizes the state of the API. """
output_str = ["", "*", " "*7, "API:"]
api_str = []
for key in ['VERSION', 'MDB']:
api_str.append("%s: %s" % (key, API.config[key]))
output_str.append(", ".join(api_str))
print(" ".join(output_str) + "\n")
# fancy print
def warn(message=None):
""" Prints 'message', a str, to stdout with some formatting. """
print(Style.GREEN, " *", Style.END, Style.BOLD, message, Style.END)
# Database methods start here
#
@utils.metered
def initialize():
""" Completely initializes the application. Scorched earth. I'm not even
kidding: unless you've got a database backup, there's no coming back from
this one! YHBW
"""
logger = utils.get_logger()
MongoClient().drop_database(API.config['MDB'])
logger.critical("Initialized database!")
# Dumpers start here: these basically dump a dictionary to the CLI so that the
# administrator can read it
# dictionary dumper
def dump_doc_to_cli(m, tab_spaces=2, gap_spaces=20, buffer_lines=0):
""" Dumps a single MDB record to stdout using print() statements.
Also works for dict objects. You know. Becuase they're the same thing.
"""
tab = " " * tab_spaces
buffer_spacer = "%s" % "\n" * buffer_lines
print(buffer_spacer)
# respecognize ordered dict key order
if isinstance(m, OrderedDict):
keys = m.keys()
else:
keys = sorted(m.keys())
for k in keys:
first_spacer = " " * (gap_spaces - len(k))
if gap_spaces >= 30:
second_spacer = " " * (gap_spaces - len(str(m[k])))
else:
second_spacer = " " * ((gap_spaces * 2) - len(str(m[k])))
if not isinstance(m[k], dict):
print("%s%s%s%s%s%s" % (
tab, k, first_spacer, m[k], second_spacer, type(m[k])
)
)
else:
print(
"%s%s%s%s%s%s" % (
tab, k, first_spacer, " " * gap_spaces, second_spacer, type(m[k])
)
)
def dump_dict_key(dictionary, key, recursion_level=1):
""" Lets us dump dictionary keys indefinitely and maintain
CLI legibility by doing indents. """
recursion_filler = " " * recursion_level
if isinstance(dictionary[key], dict):
print("%s%s%s" % (" " * gap_spaces, recursion_filler, key))
for r_key in dictionary[key].keys():
dump_dict_key(
dictionary[key],
r_key,
recursion_level + 1
)
else:
print(
"%s%s`- %s: %s" % (
" " * gap_spaces,
recursion_filler,
key,
dictionary[key],
)
)
for key in sorted(m[k].keys()):
dump_dict_key(m[k], key)
print(buffer_spacer)
# survivor dumper
def dump_survivor_to_cli(s_id, detailed=False):
""" Dump a simplified representation of a survivor to the CLI buffer. """
spacer = 30
s = utils.mdb.survivors.find_one({'_id': s_id})
if detailed:
dump_doc_to_cli(s)
else:
print(
" %s\t" % s['_id'],
"%s | %s" % (s['sex'], s['name']),
" " * (spacer - len(s['name'])),
"created: %s" % (s['created_on'].strftime(utils.ymd)),
end=''
)
if s.get('removed', False):
print(" / removed: %s" % (s['removed'].strftime(utils.ymd)), end='')
print()
# settlement dumper
def dump_settlement_to_cli(s_id, verbose=False):
""" Dump a simplified representation of a settlement dict 's' to the
command line. """
s = utils.mdb.settlements.find_one({'_id': s_id})
if verbose:
dump_doc_to_cli(s)
else:
s_repr = OrderedDict()
attrs = ['_id', 'name', 'campaign', 'expansions',
'created_by', 'created_on']
for attr in attrs:
try:
s_repr[attr] = s[attr]
except:
s_repr[attr] = None
dump_doc_to_cli(s_repr)
if s.get('removed', False):
print(' \x1b[1;31;40m Removed on %s \x1b[0m \n' % s['removed'])
survivors = utils.mdb.survivors.find({'settlement': s['_id']})
removed_survivors = utils.mdb.survivors.find(
{'settlement': s['_id'], 'removed': {'$exists': True}}
)
print(" %s survivors found. %s have been removed.\n" % (
survivors.count(),
removed_survivors.count()
))
for survivor in survivors:
dump_survivor_to_cli(survivor['_id'])
print()
def dump_settlement_timeline_to_cli(s_id, verbose=False):
""" Dumps the timeline for settlement with 's_id'. """
settlement = utils.mdb.settlements.find_one({'_id': s_id})
for ly in settlement['timeline']:
print(' ' + str(settlement['timeline'][ly['year']]))
# tl_dict = settlement['timeline']
# year_int = ly['year']
# print(' %s \ ' % (year_int))
# for event_type in tl_dict[year_int].keys():
# if event_type != 'year':
# print(' - %s: %s' % (event_type, tl_dict[year_int][event_type]))
#
# class methods below:
# - AdministrationObject
# - UserManagementObject
#
class AdministrationObject:
""" The AdministrationObject class is basically just a way of making
argument parsing and processing a little more OO and therefore easier to
remember/debug. """
def __init__(self):
"""
DO NOT INITIALIZE ONE OF THESE UNLESS __name__ == '__main__'
Once initialized, this object is used to perform an admin function and,
therefore, all of its methods return an exit status.
"""
parser = argparse.ArgumentParser(description=' KDM API Administration!')
# dev / r&d
# clone (users)
parser.add_argument('--dump_requests', dest='dump_requests',
default=None, metavar=5, type=int,
help="[DEV] Dumps the last N requests to the CLI.",
)
parser.add_argument('--dump_users', dest='dump_users',
default=False, metavar=5, type=int,
help="[DEV] Dumps the last N users to the CLI.",
)
clone_params = parser.add_mutually_exclusive_group()
clone_params.add_argument(
'--clone_user',
action="store_true",
dest='clone_user',
default=False,
help = (
"[CLONE] Clone one user from production to local (requires "
"--user to be an email address)."
),
)
clone_params.add_argument(
'--clone_recent_users',
action="store_true",
dest='clone_recent_users',
default=False,
help="[CLONE] Clone recent production users to local.",
)
# admin (users)
parser.add_argument('--user', dest='user', default=None,
help="Work with a user",
metavar="[email protected]")
parser.add_argument('--reset_password', dest='reset_pw', default=None,
help="[USER] Reset a user's password (manually) "
"(requires --user).",
action='store_true')
parser.add_argument("--admin", dest="user_admin", default=False,
action="store_true",
help=(
"[USER] "
"Toggle admin status (requires --user)."
),
)
parser.add_argument("--level", dest="user_subscriber_level", type=int,
help=(
"[USER] "
"Set subscriber level (requires --user)."
),
metavar=2
)
parser.add_argument("--settlements", dest="user_settlements",
default=False, action="store_true",
help=(
"[USER] "
"Dump user settlements (requires --user)."
),
)
parser.add_argument("--survivors", dest="user_survivors",
default=False, action="store_true",
help=(
"[USER] "
"Dump user survivors (requires --user)."
),
)
# work with settlements
parser.add_argument('--settlement', dest='settlement', default=None,
help=(
"[SETTLEMENT] "
"Work with a settlement"
),
metavar="5d13762e84d8863941ed4e20")
parser.add_argument('--dump_settlement', dest='dump_settlement',
default=None, help = (
"[SETTLEMENT] "
"Dump settlement details"
),
action="store_true"),
parser.add_argument('--dump_timeline', dest='dump_settlement_timeline',
default=None, help = (
"[SETTLEMENT] "
"Dump settlement's timeline."
),
action="store_true"),
parser.add_argument('--event_log', dest='settlement_event_log',
help=(
"[SETTLEMENT] "
"Dump the settlement event log "
"(requires --settlement)."
),
default=False,
action="store_true",
)
parser.add_argument('--remove_settlement', dest='settlement_remove',
help=(
"[SETTLEMENT] "
"Mark the settlement 'removed': this queues for"
" it for automatic deletion."
"(requires --settlement)."
),
default=False,
action="store_true",
)
parser.add_argument('--unremove_settlement', dest='settlement_unremove',
help=(
"[SETTLEMENT] "
"Remove the 'removed' flag and take the "
"settlement out of the delete queue."
"(requires --settlement)."
),
default=False,
action="store_true",
)
# work with survivors
parser.add_argument('--survivor', dest='survivor', default=None,
help=(
"[SURVIVOR] "
"Work with a survivor"
),
metavar="135d137e3d81953941ed4e20")
parser.add_argument('--dump_survivor', dest='dump_survivor',
default=None, help = (
"[SURVIVOR] "
"Dump survivor details"
),
action="store_true")
# sysadmin / console cowboy / hacker shit
parser.add_argument('-f', '--force', dest='force', default=False,
help=(
"[SYSADMIN] Add the 'force' option to "
"whatever it is that we're doing"
),
action="store_true",
)
parser.add_argument('--initialize', dest='initialize', default=False,
help=(
"[SYSADMIN] "
"Initialize the mdb database, '%s'"
) % (
API.config['MDB'],
), action="store_true",
)
parser.add_argument('--patch', dest='apply_patch',
metavar="patch_method", default=None,
help=(
"[SYSADMIN] "
"Apply a patch (from the patches.py module)."
),
)
parser.add_argument('--patch_args', dest='patch_args',
metavar="patch_args", default=None,
help=(
"[SYSADMIN] "
"Comma-delimited positional arguments to pass "
"to the patch method being called."
),
)
parser.add_argument("--purge_settlements", dest="purge_settlements",
help=(
"[SYSADMIN] "
"Drops settlements marked 'removed' from mdb. "
"Works recursively & drops 'removed' survivors."
" Max age is date removed + %s days."
) % (
utils.settings.get(
'users',
'removed_settlement_age_max'
)
),
action="store_true", default=False)
self.options = parser.parse_args()
def process_args(self):
""" Analyzes self.options and self.args and calls one of our class
methods. """
# first, check to see if we're patching / hot-fixing
if self.options.apply_patch is not None:
p_name = str(self.options.apply_patch)
try:
patch_method = getattr(patch, p_name)
except AttributeError:
print(" '%s' is not a known patch! Exiting...\n" % p_name)
sys.exit(1)
if self.options.patch_args:
args = self.options.patch_args.split(',')
patch_method(*args)
else:
patch_method()
print(' Patch applied successfully!\n')
# idiot-proofing
if (
self.options.user is not None and
self.options.settlement is not None
):
msg = "Cannot work with a user and a settlement at the same time!"
raise ValueError(msg)
# purge settlements/survivors marked 'removed' from mdb
if self.options.purge_settlements:
print(" Purging 'removed' settlements from MDB...")
purge_results = purge.purge_removed_settlements(arm=True)
print(' Settlements purged: %s'% purge_results['settlements'])
print(' Survivors purged: %s' % purge_results['survivors'])
print(' Done!\n')
# initialize MDB
if self.options.initialize:
# sanity/env check
print(" hostname: %s" % socket.gethostname())
if socket.gethostname() not in ["mona"]:
print(" Holy shit! This is not the dev machine! Exiting...\n")
sys.exit(1)
msg = (
' Initialize the project and',
'%sremove all data%s?' % (Style.YELLOW, Style.END),
'Type "YES" to proceed: ',
)
manual_approve = input(" ".join(msg))
if manual_approve == "YES":
initialize()
print(
Style.BOLD,
"\n Project initialized!",
Style.RED,
"ALL DATA REMOVED!!\n",
Style.END
)
print(' Exiting...\n')
sys.exit()
# dump request logs
if self.options.dump_requests is not None:
self.dump_request_logs(self.options.dump_requests)
# dump users
if self.options.dump_users:
self.dump_recent_users(self.options.dump_users)
# clone user (by OID) from legacy webapp
if self.options.clone_user:
self.clone_one_user(force=self.options.force)
# clone many users (via API route)
if self.options.clone_recent_users == True:
self.clone_many_users(pickle_auth=True)
# work with user
if self.options.user is not None:
self.work_with_user()
# work with settlement
if self.options.settlement is not None:
self.work_with_settlement()
# work with survivor
if self.options.survivor is not None:
self.work_with_survivor()
return 0
#
# log browsing
#
def dump_request_logs(self, how_many=1):
""" Dumps logs of recent requests. """
logs = utils.mdb.api_response_times.find().sort(
'created_on',
-1
).limit(how_many)
for log in logs:
dump_doc_to_cli(log)
#
# user browsing
#
def dump_recent_users(self, how_many=1):
""" Dumps summary info on recent users. """
users = utils.mdb.users.find().sort(
'latest_activity',
-1
).limit(how_many)
longest_email = 0
for user in users:
if len(user['login']) > longest_email:
longest_email = len(user['login'])
users.rewind()
users.sort('latest_activity', -1)
# header
print(
" " * 10,
"OID", " " * 11,
int(longest_email / 2.2) * " ",
"login",
int(longest_email / 1.7) * " ",
"latest_activity"
)
# dump
for user in users:
stub = "[%s] %s%s%s"
spacer = (longest_email + 2) - len(user['login'])
print(
stub % (
user['_id'],
user['login'],
" " * spacer,
user['latest_activity']
)
)
print()
#
# methods for cloning users from production
#
def clone_one_user(self, user_email=None, force=False):
""" Clones one user. Prompts to reset password. """
prod_api_url = API.config['PRODUCTION']['url']
print("\n KDM-API: %s\n Initiating request..." % prod_api_url)
# do it!
new_oid = clone.get_one_user_from_api(
prod_api_url,
pickle_auth = True,
u_login = self.options.user
)
# initialize the user as an object
user_object = models.users.User(_id=new_oid)
print(' %s%s%s has been cloned!' % (
Style.YELLOW,
user_object.user['login'],
Style.END,
)
)
# password reset business logic
approval = None
# use case 1: the admin wants to force it
if force:
approval = 'Y'
# use case 2: no force flag; ask for approval via CLI
if not force:
while approval is None:
try:
msg = " Reset password? [YES]: "
approval = input(msg)
except EOFError:
pass
# default to yes, e.g. if we just hit enter
if approval is not None and len(approval) == 0:
approval = 'Y'
# proceed only with approval
if approval[0].upper() == 'Y':
user_object.update_password('password')
print(' Password has been reset!\n')
else:
print(" Password has NOT been reset.\n")
@utils.metered
@pickle_login
def clone_many_users(self, **kwargs):
"""Gets a list of recent production users, iterates through the list
calling the self.clone_one_user() on each. """
# set the request URL, call the method from clone.py:
prod_api_url = API.config['PRODUCTION']['url']
users = clone.get_recent_users_from_api(
prod_api_url,
admin_login = kwargs['admin_login'],
admin_password = kwargs['admin_password'],
)
if len(users) == 0:
print('\n No recent users to clone! Exiting...\n')
sys.exit(255)
# now, iterate over users and do each on a thread
threads = []
for prod_user in users:
clone_thread = threading.Thread(
target=clone.get_one_user_from_api,
args = (prod_api_url,),
kwargs = {
'u_login': prod_user['login'],
'admin_login': kwargs['admin_login'],
'admin_password': kwargs['admin_password'],
'force': True
},
daemon=True,
)
threads.append(clone_thread)
clone_thread.start()
finished_threads = 0
def animate():
""" spin a little spinner. """
for c in itertools.cycle(['|', '/', '-', '\\']):
if finished_threads == len(threads):
break
sys.stdout.write('\r Cloning %s users... ' % len(users) + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r Done! \n\n')
# summarize what we did:
for cloned_user in users:
print(" %s - %s " % (
cloned_user['_id']['$oid'],
cloned_user['login']
)
)
print()
t = threading.Thread(target=animate)
t.start()
for thread in threads:
thread.join()
finished_threads += 1
#
# methods for working with a local user
#
def work_with_user(self):
""" In which we perform user maintenance based on self.options. """
# first, see if self.options.user it looks like an email address
if (not ObjectId.is_valid(self.options.user)
and '@' in self.options.user):
u_oid = models.users.get_user_id_from_email(self.options.user)
um_object = UserManagementObject(oid=u_oid)
else:
um_object = UserManagementObject(oid=self.options.user)
um_object.print_header()
# now that we have a UserManagementObject initialized, see what it
# looks like the administrator wants to do with the user
if self.options.user_admin:
um_object.User.toggle_admin_status()
warn('Set admin status to %s!' % um_object.User.is_admin())
if isinstance(self.options.user_subscriber_level, int):
um_object.User.set_subscriber_level(
self.options.user_subscriber_level
)
warn('Set subscriber level to %s!' % (
um_object.User.get_subscriber_level()
))
if self.options.reset_pw:
default_pass = 'password'
pw_1 = input(" New password ['%s']: " % default_pass)
pw_2 = input(" New password (again): ")
if pw_1 != pw_2:
raise ValueError("New passwords must match!")
# handle defaulting
if pw_1 == '':
um_object.User.update_password(new_password=default_pass)
else:
um_object.User.update_password(new_password=pw_1)
print()
warn('Reset password for %s' % um_object.User.user['login'])
time.sleep(2)
# now that we've done whatever we're doing, dump the user to stdout to
# show the changes
um_object.dump()
# finally, if we're dumping settlements/survivors, do it now
if self.options.user_settlements:
um_object.dump_settlements()
if self.options.user_survivors:
um_object.dump_survivors()
#
# methods for working with a local settlement
#
def work_with_settlement(self):
""" In which we perform settlement maintenance based on self.options."""
# first, see if self.options.settlement is a valid OID
if not ObjectId.is_valid(self.options.settlement):
msg = "'%s' does not look like a valid OID!" % (
self.options.settlement
)
raise ValueError(msg)
sm_object = SettlementAdministrationObject(
oid=ObjectId(self.options.settlement)
)
# do operations first
if self.options.settlement_remove:
sm_object.Settlement.remove()
if self.options.settlement_unremove:
sm_object.Settlement.unremove()
# now print the current state stuff
sm_object.print_header()
if self.options.dump_settlement:
dump_settlement_to_cli(sm_object._id)
if self.options.dump_settlement_timeline:
dump_settlement_timeline_to_cli(sm_object._id)
if self.options.settlement_event_log:
sm_object.dump_event_log()
#
# methods for working with a local survivor
#
def work_with_survivor(self):
""" What it sounds like."""
# first, see if self.options.settlement is a valid OID
if not ObjectId.is_valid(self.options.survivor):
msg = "'%s' does not look like a valid OID!" % (
self.options.survivor
)
raise ValueError(msg)
admin_object = SurvivorAdministrationObject(
oid=ObjectId(self.options.survivor)
)
admin_object.print_header()
if self.options.dump_survivor:
print()
dump_survivor_to_cli(admin_object._id, detailed=True)
class UserManagementObject:
""" The UserManagementObject (UMO in releases < 1.0.0) is basically an
object with a bunch of goofy/one-off methods for doing user management
operations via CLI. """
def __init__(self, oid=None):
""" Initializes a user the normal way, and sets the initialized user
object as self.User, e.g. if you want to call User object methods.
Also has a bunch of its own methods that are not available via the
normal User object, and are admin-specific. """
if not ObjectId.is_valid(oid):
print("The user ID '%s' is not a valid Object ID." % (oid))
sys.exit(1)
self._id = ObjectId(oid)
self.User = models.users.User(_id=self._id)
self.login = self.User.user['login']
def dump(self):
""" Dump the user to stdout in a colorful, highly human readable sort
of way. """
serialized_user = self.User.serialize(dict)['user']
mini_repr = OrderedDict()
if 'admin' in self.User.user.keys():
mini_repr['admin'] = self.User.user['admin']
for time_attr in ['created_on', 'latest_sign_in', 'latest_activity',
'latest_authentication']:
try:
mini_repr[time_attr] = utils.get_time_elapsed_since(
serialized_user[time_attr], 'age'
)
except KeyError:
mini_repr[time_attr] = None
for attr in ['settlements_created', 'survivors_created']:
mini_repr[attr] = serialized_user[attr]
dump_doc_to_cli(mini_repr, gap_spaces=25)
print(" User subscriber status:")
dump_doc_to_cli(self.User.user['subscriber'])
if self.User.user['preferences'] != {}:
print(' User Preferences:')
dump_doc_to_cli(self.User.user['preferences'], gap_spaces=35)
def dump_settlements(self):
""" Dump a CLI summary of the user's settlements. """
settlements = utils.mdb.settlements.find({'created_by': self._id})
if settlements is not None:
ok = input(str(
"\n Press a key to dump %s settlements: " % settlements.count()
))
print("\n", Style.BOLD, "User settlements:", Style.END)
for s in settlements:
dump_settlement_to_cli(s['_id'])
def dump_survivors(self):
""" Dump a CLI summary of the user's survivors. """
survivors = utils.mdb.survivors.find({'created_by': self._id})
if survivors is not None:
ok = input(str(
"\n Press a key to dump %s survivors: " % survivors.count()
))
print("\n", Style.BOLD, "User survivors:", Style.END)
for s in survivors:
dump_survivor_to_cli(s['_id'])
def print_header(self):
""" Prints a little header to stdout that says who we're working on."""
print(
" Working with user", Style.YELLOW, self.login, Style.END, self._id,
)
class AssetAdministrationObject(object):
""" The base class used to initialize asset management objects, which are
objects that 'wrap' a regular asset object for administrative maintenance
purposes (i.e. stuff the normal API/app wouldn't do). """
def __init__(self, *args, **kwargs):
""" Initialize an asset administration object. """
# start it up
self.args = args
self.kwargs = kwargs
self.logger = utils.get_logger()
# set asset type and collection first
if 'Administration' not in type(self).__name__:
err = 'Objects must include "Administration" in their name!'
raise TypeError(err)
self.asset_type = type(self).__name__.split('Administration')[0]
self.collection = self.asset_type.lower() + 's'
# now use args/kwargs to sets self.Survivor, self.Settlement, etc.
self._id = ObjectId(self.kwargs['oid'])
self.model = getattr(models, self.collection) # models.settlements
self.asset_class = getattr(self.model, self.asset_type) # models.settlements.Settlement
setattr(
self,
self.asset_type,
self.asset_class(_id=self._id)
)
self.load()
# log successful initialization
init_msg = 'Initialized adminstration object for %s' % (
getattr(self, self.asset_type),
)
self.logger.info(init_msg)
def load(self):
""" Sets attributes of self to be key/value pairs of the asset. """
record = getattr(
getattr(self, self.asset_type),
self.collection[:-1]
)
for k, v in record.items():
setattr(self, k, v)
def print_header(self):
""" Prints a generic header to the CLI that describes the asset. """
print(
" Working with '%s' record for " % self.collection,
Style.YELLOW,
self.name.strip(),
Style.END,
self._id,
)
creator = utils.mdb.users.find_one(
{'_id': self.created_by}
)
print(" (created by: %s %s)" % (creator['login'], creator['_id']))
class SurvivorAdministrationObject(AssetAdministrationObject):
""" Takes AssetAdministrationObject as its base class; has methods for
working with individual survivor records, etc. """
class SettlementAdministrationObject(AssetAdministrationObject):
""" Like the UserManagementObject (above), the SettlementManagementObject
is basically a special object used only by admin methods that allows the
user to perform settlement maintenance. """
def dump_event_log(self):
""" Uses the vanilla Settlement object's built-in get_event_log() method
to get the settlement's event log and print it in a CLI-friendly way."""
event_log = self.Settlement.get_event_log(query_sort=1)
for event in event_log:
dump_doc_to_cli(event, gap_spaces=35)
if __name__ == '__main__':
#stat()
ADMIN_OBJECT = AdministrationObject()
ADMIN_OBJECT.process_args()
|
client.py
|
import socket,sys,threading
def worker1(sock):
try:
while True:
s = input()+"\r\n"
sock.sendall(s.encode('utf-8'))
except:
pass
def worker2(sock):
try:
while True:
# ネットワークのバッファサイズは1024。サーバからの文字列を取得する
data = sock.recv(1024)
if len(data)==0:
print("Broken pipe")
break
print(data)
except:
pass
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
# サーバを指定
sock.connect(('127.0.0.1', 8080))
# サーバにメッセージを送る
# スレッドに workder1 関数を渡す
t1 = threading.Thread(target=worker1,args=(sock,))
t2 = threading.Thread(target=worker2,args=(sock,))
# スレッドスタート
t1.start()
t2.start()
print('started')
t2.join()
except:
pass
|
__init__.py
|
#####################################################################
# #
# /plugins/progress_bar/__init__.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
from Queue import Queue, Empty
else:
from queue import Queue, Empty
import logging
import os
import subprocess
import threading
import sys
import time
import numpy as np
from qtutils import UiLoader, inmain, inmain_decorator
from qtutils.qt import QtGui, QtWidgets, QtCore
import labscript_utils.h5_lock
import h5py
import labscript_utils.properties as properties
from labscript_utils.connections import ConnectionTable
from zprocess import Event, TimeoutError
from blacs.plugins import PLUGINS_DIR, callback
name = "Progress Bar"
module = "progress_bar" # should be folder name
logger = logging.getLogger('BLACS.plugin.%s'%module)
# The progress bar will update every UPDATE_INTERVAL seconds, or at the marker
# times, whichever is soonest after the last update:
UPDATE_INTERVAL = 0.02
BAR_MAX = 1000
def _ensure_str(s):
"""convert bytestrings and numpy strings to python strings"""
return s.decode() if isinstance(s, bytes) else str(s)
def black_has_good_contrast(r, g, b):
"""Return whether black text or white text would have better contrast on a
background of the given colour, according to W3C recommendations (see
https://www.w3.org/TR/WCAG20/). Return True for black or False for
white"""
cs = []
for c in r, g, b:
c = c / 255.0
if c <= 0.03928:
c = c/12.92
else:
c = ((c+0.055)/1.055) ** 2.4
cs.append(c)
r, g, b = cs
L = 0.2126 * r + 0.7152 * g + 0.0722 * b
return L > np.sqrt(1.05 * 0.05) - 0.05
class Plugin(object):
def __init__(self, initial_settings):
self.menu = None
self.notifications = {}
self.initial_settings = initial_settings
self.BLACS = None
self.command_queue = Queue()
self.master_pseudoclock = None
self.shot_start_time = None
self.stop_time = None
self.markers = None
self.waits = None
self.time_spent_waiting = None
self.next_wait_index = None
self.next_marker_index = None
self.bar_text_prefix = None
self.h5_filepath = None
self.wait_completed_events_supported = False
self.wait_completed = Event('wait_completed', type='wait')
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def plugin_setup_complete(self, BLACS):
self.BLACS = BLACS
self.ui = UiLoader().load(os.path.join(PLUGINS_DIR, module, 'controls.ui'))
self.bar = self.ui.bar
self.style = QtWidgets.QStyleFactory.create('Fusion')
if self.style is None:
# If we're on Qt4, fall back to Plastique style:
self.style = QtWidgets.QStyleFactory.create('Plastique')
if self.style is None:
# Not sure what's up, but fall back to app's default style:
self.style = QtWidgets.QApplication.style()
self.bar.setStyle(self.style)
self.bar.setMaximum(BAR_MAX)
self.bar.setAlignment(QtCore.Qt.AlignCenter)
# Add our controls to the BLACS gui:
BLACS['ui'].queue_status_verticalLayout.insertWidget(0, self.ui)
# We need to know the name of the master pseudoclock so we can look up
# the duration of each shot:
self.master_pseudoclock = self.BLACS['experiment_queue'].master_pseudoclock
# Check if the wait monitor device, if any, supports wait completed events:
with h5py.File(self.BLACS['connection_table_h5file'], 'r') as f:
if 'waits' in f:
acq_device = f['waits'].attrs['wait_monitor_acquisition_device']
acq_device = _ensure_str(acq_device)
if acq_device:
props = properties.get(f, acq_device, 'connection_table_properties')
if props.get('wait_monitor_supports_wait_completed_events', False):
self.wait_completed_events_supported = True
self.ui.wait_warning.hide()
def get_save_data(self):
return {}
def get_callbacks(self):
return {'science_over': self.on_science_over,
'science_starting': self.on_science_starting}
@callback(priority=100)
def on_science_starting(self, h5_filepath):
# Tell the mainloop that we're starting a shot:
self.command_queue.put(('start', h5_filepath))
@callback(priority=5)
def on_science_over(self, h5_filepath):
# Tell the mainloop we're done with this shot:
self.command_queue.put(('stop', None))
@inmain_decorator(True)
def clear_bar(self):
self.bar.setEnabled(False)
self.bar.setFormat('No shot running')
self.bar.setValue(0)
self.bar.setPalette(self.style.standardPalette())
self.ui.wait_warning.hide()
def get_next_thing(self):
"""Figure out what's going to happen next: a wait, a time marker, or a
regular update. Return a string saying which, and a float saying how
long from now it will occur. If the thing has already happened but not
been taken into account by our processing yet, then return zero for
the time."""
if self.waits is not None and self.next_wait_index < len(self.waits):
next_wait_time = self.waits['time'][self.next_wait_index]
else:
next_wait_time = np.inf
if self.markers is not None and self.next_marker_index < len(self.markers):
next_marker_time = self.markers['time'][self.next_marker_index]
else:
next_marker_time = np.inf
assert self.shot_start_time is not None
assert self.time_spent_waiting is not None
labscript_time = time.time() - self.shot_start_time - self.time_spent_waiting
next_update_time = labscript_time + UPDATE_INTERVAL
if next_update_time < next_wait_time and next_update_time < next_marker_time:
return 'update', UPDATE_INTERVAL
elif next_wait_time < next_marker_time:
return 'wait', max(0, next_wait_time - labscript_time)
else:
return 'marker', max(0, next_marker_time - labscript_time)
@inmain_decorator(True)
def update_bar_style(self, marker=False, wait=False, previous=False):
"""Update the bar's style to reflect the next marker or wait,
according to self.next_marker_index or self.next_wait_index. If
previous=True, instead update to reflect the current marker or
wait."""
assert not (marker and wait)
# Ignore requests to reflect markers or waits if there are no markers
# or waits in this shot:
marker = marker and self.markers is not None and len(self.markers) > 0
wait = wait and self.waits is not None and len(self.waits) > 0
if marker:
marker_index = self.next_marker_index
if previous:
marker_index -= 1
assert marker_index >= 0
label, _, color = self.markers[marker_index]
self.bar_text_prefix = '[%s] ' % _ensure_str(label)
r, g, b = color[0]
# Black is the default colour in labscript.add_time_marker.
# Don't change the bar colour if the marker colour is black.
if (r, g, b) != (0,0,0):
bar_color = QtGui.QColor(r, g, b)
if black_has_good_contrast(r, g, b):
highlight_text_color = QtCore.Qt.black
else:
highlight_text_color = QtCore.Qt.white
else:
bar_color = None
highlight_text_color = None
regular_text_color = None # use default
elif wait:
wait_index = self.next_wait_index
if previous:
wait_index -= 1
assert wait_index >= 0
label = self.waits[wait_index]['label']
self.bar_text_prefix = '-%s- ' % _ensure_str(label)
highlight_text_color = regular_text_color = QtGui.QColor(192, 0, 0)
bar_color = QtCore.Qt.gray
if marker or wait:
palette = QtGui.QPalette()
if bar_color is not None:
palette.setColor(QtGui.QPalette.Highlight, bar_color)
# Ensure the colour of the text on the filled in bit of the progress
# bar has good contrast:
if highlight_text_color is not None:
palette.setColor(QtGui.QPalette.HighlightedText, highlight_text_color)
if regular_text_color is not None:
palette.setColor(QtGui.QPalette.Text, regular_text_color)
self.bar.setPalette(palette)
else:
self.bar_text_prefix = None
# Default palette:
self.bar.setPalette(self.style.standardPalette())
@inmain_decorator(True)
def update_bar_value(self, marker=False, wait=False):
"""Update the progress bar with the current time elapsed. If marker or wait is
true, then use the exact time at which the next marker or wait is defined,
rather than the current time as returned by time.time()"""
thinspace = u'\u2009'
self.bar.setEnabled(True)
assert not (marker and wait)
if marker:
labscript_time = self.markers['time'][self.next_marker_index]
elif wait:
labscript_time = self.waits['time'][self.next_wait_index]
else:
labscript_time = time.time() - self.shot_start_time - self.time_spent_waiting
value = int(round(labscript_time / self.stop_time * BAR_MAX))
self.bar.setValue(value)
text = u'%.2f%ss / %.2f%ss (%%p%s%%)'
text = text % (labscript_time, thinspace, self.stop_time, thinspace, thinspace)
if self.bar_text_prefix is not None:
text = self.bar_text_prefix + text
self.bar.setFormat(text)
def _start(self, h5_filepath):
"""Called from the mainloop when starting a shot"""
self.h5_filepath = h5_filepath
# Get the stop time, any waits and any markers from the shot:
with h5py.File(h5_filepath, 'r') as f:
props = properties.get(f, self.master_pseudoclock, 'device_properties')
self.stop_time = props['stop_time']
try:
self.markers = f['time_markers'][:]
self.markers.sort(order=(bytes if PY2 else str)('time'))
except KeyError:
self.markers = None
try:
self.waits = f['waits'][:]
self.waits.sort(order=(bytes if PY2 else str)('time'))
except KeyError:
self.waits = None
self.shot_start_time = time.time()
self.time_spent_waiting = 0
self.next_marker_index = 0
self.next_wait_index = 0
def _stop(self):
"""Called from the mainloop when ending a shot"""
self.h5_filepath = None
self.shot_start_time = None
self.stop_time = None
self.markers = None
self.waits = None
self.time_spent_waiting = None
self.next_wait_index = None
self.next_marker_index = None
self.bar_text_prefix = None
def mainloop(self):
running = False
self.clear_bar()
while True:
try:
if running:
# How long until the next thing of interest occurs, and
# what is it? It can be either a wait, a marker, or a
# regular update.
next_thing, timeout = self.get_next_thing()
try:
command, _ = self.command_queue.get(timeout=timeout)
except Empty:
if next_thing == 'update':
self.update_bar_value()
if next_thing == 'marker':
self.update_bar_style(marker=True)
self.update_bar_value(marker=True)
self.next_marker_index += 1
elif next_thing == 'wait':
wait_start_time = time.time()
self.update_bar_style(wait=True)
self.update_bar_value(wait=True)
self.next_wait_index += 1
# wait for the wait to complete, but abandon
# processing if the command queue is non-empty,
# i.e. if a stop command is sent.
while self.command_queue.empty():
try:
# Wait for only 0.1 sec at a time, so that
# we can check if the queue is empty in between:
self.wait_completed.wait(self.h5_filepath, timeout=0.1)
except TimeoutError:
# Only wait for wait completed events if the wait
# monitor device supports them. Otherwise, skip
# after this first timeout, and it will just look
# like the wait had 0.1 sec duration.
if self.wait_completed_events_supported:
# The wait is still in progress:
continue
# The wait completed (or completion events are not
# supported):
self.time_spent_waiting += time.time() - wait_start_time
# Set the bar style back to whatever the
# previous marker was, if any:
self.update_bar_style(marker=True, previous=True)
self.update_bar_value()
break
continue
else:
command, h5_filepath = self.command_queue.get()
if command == 'close':
break
elif command == 'start':
assert not running
running = True
self._start(h5_filepath)
self.update_bar_value()
if (
self.waits is not None
and len(self.waits) > 0
and not self.wait_completed_events_supported
):
inmain(self.ui.wait_warning.show)
elif command == 'stop':
assert running
self.clear_bar()
running = False
self._stop()
else:
raise ValueError(command)
except Exception:
logger.exception("Exception in mainloop, ignoring.")
# Stop processing of the current shot, if any.
self.clear_bar()
inmain(self.bar.setFormat, "Error in progress bar plugin")
running = False
self._stop()
def close(self):
self.command_queue.put(('close', None))
self.mainloop_thread.join()
# The rest of these are boilerplate:
def get_menu_class(self):
return None
def get_notification_classes(self):
return []
def get_setting_classes(self):
return []
def set_menu_instance(self, menu):
self.menu = menu
def set_notification_instances(self, notifications):
self.notifications = notifications
|
learn.py
|
#!/usr/bin/python3
import json
import csv
from random import shuffle
import warnings
import pickle
import gzip
import operator
import time
import logging
import math
from threading import Thread
import functools
# create logger with 'spam_application'
logger = logging.getLogger('learn')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('learn.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - [%(name)s/%(funcName)s] - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
import numpy
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import cluster, mixture
from sklearn.neighbors import kneighbors_graph
from naive_bayes import ExtendedNaiveBayes
from naive_bayes2 import ExtendedNaiveBayes2
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
class AI(object):
def __init__(self, family, path_to_data):
self.logger = logging.getLogger('learn.AI')
self.naming = {'from': {}, 'to': {}}
self.family = family
self.path_to_data = path_to_data
def classify(self, sensor_data):
header = self.header[1:]
is_unknown = True
csv_data = numpy.zeros(len(header))
for sensorType in sensor_data['s']:
for sensor in sensor_data['s'][sensorType]:
sensorName = sensorType + "-" + sensor
if sensorName in header:
is_unknown = False
csv_data[header.index(sensorName)] = sensor_data[
's'][sensorType][sensor]
payload = self.do_classification(header, csv_data)
payload['is_unknown'] = is_unknown
return payload
def do_classification(self, header, csv_data):
"""
header = ['wifi-a', 'wifi-b']
csv_data = [-67 0]
"""
t = time.time()
payload = {'location_names': self.naming['to'], 'predictions': []}
for name in self.algorithms:
try:
prediction = self.algorithms[
name].predict_proba(csv_data.reshape(1, -1))
except Exception as e:
logger.error(str(e))
continue
predict = {}
for i, pred in enumerate(prediction[0]):
predict[i] = pred
predict_payload = {'name': name,
'locations': [], 'probabilities': []}
badValue = False
for tup in sorted(predict.items(), key=operator.itemgetter(1), reverse=True):
predict_payload['locations'].append(str(tup[0]))
predict_payload['probabilities'].append(
round(float(tup[1]), 2))
if math.isnan(tup[1]):
badValue = True
break
if badValue:
continue
payload['predictions'].append(predict_payload)
# try:
# t2 = time.time()
# name = "Extended Naive Bayes"
# clf = ExtendedNaiveBayes(self.family,path_to_data=self.path_to_data)
# predictions = clf.predict_proba(header,csv_data)
# predict_payload = {'name': name,'locations': [], 'probabilities': []}
# for tup in predictions:
# predict_payload['locations'].append(str(self.naming['from'][tup[0]]))
# predict_payload['probabilities'].append(round(tup[1],2))
# payload['predictions'].append(predict_payload)
# self.logger.debug("{} {:d} ms".format(name,int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
# try:
# t2 = time.time()
# name = "Extended Naive Bayes2"
# clf = ExtendedNaiveBayes2(self.family, path_to_data=self.path_to_data)
# predictions = clf.predict_proba(header, csv_data)
# predict_payload = {'name': name, 'locations': [], 'probabilities': []}
# for tup in predictions:
# predict_payload['locations'].append(
# str(self.naming['from'][tup[0]]))
# predict_payload['probabilities'].append(round(tup[1], 2))
# payload['predictions'].append(predict_payload)
# self.logger.debug("{} {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
return payload
@timeout(10)
def train(self, clf, x, y):
return clf.fit(x, y)
def learn(self, fname):
t = time.time()
# load CSV file
self.header = []
rows = []
naming_num = 0
with open(fname, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
self.header = row
else:
for j, val in enumerate(row):
if val == '':
row[j] = 0
continue
try:
row[j] = float(val)
except:
if val not in self.naming['from']:
self.naming['from'][val] = naming_num
self.naming['to'][naming_num] = val
naming_num += 1
row[j] = self.naming['from'][val]
rows.append(row)
# first column in row is the classification, Y
y = numpy.zeros(len(rows))
x = numpy.zeros((len(rows), len(rows[0]) - 1))
# shuffle it up for training
record_range = list(range(len(rows)))
shuffle(record_range)
for i in record_range:
y[i] = rows[i][0]
x[i, :] = numpy.array(rows[i][1:])
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Decision Tree",
"Random Forest",
"Neural Net",
"AdaBoost",
"Naive Bayes",
"QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025, probability=True),
SVC(gamma=2, C=1, probability=True),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(
max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
self.algorithms = {}
# split_for_learning = int(0.70 * len(y))
for name, clf in zip(names, classifiers):
t2 = time.time()
self.logger.debug("learning {}".format(name))
try:
self.algorithms[name] = self.train(clf, x, y)
# score = self.algorithms[name].score(x,y)
# logger.debug(name, score)
self.logger.debug("learned {}, {:d} ms".format(
name, int(1000 * (t2 - time.time()))))
except Exception as e:
self.logger.error("{} {}".format(name,str(e)))
# t2 = time.time()
# name = "Extended Naive Bayes"
# clf = ExtendedNaiveBayes(self.family, path_to_data=self.path_to_data)
# try:
# clf.fit(fname)
# self.logger.debug("learned {}, {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
# t2 = time.time()
# name = "Extended Naive Bayes2"
# clf = ExtendedNaiveBayes2(self.family, path_to_data=self.path_to_data)
# try:
# clf.fit(fname)
# self.logger.debug("learned {}, {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def save(self, save_file):
t = time.time()
f = gzip.open(save_file, 'wb')
pickle.dump(self.header, f)
pickle.dump(self.naming, f)
pickle.dump(self.algorithms, f)
pickle.dump(self.family, f)
f.close()
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def load(self, save_file):
t = time.time()
f = gzip.open(save_file, 'rb')
self.header = pickle.load(f)
self.naming = pickle.load(f)
self.algorithms = pickle.load(f)
self.family = pickle.load(f)
f.close()
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def do():
ai = AI()
ai.load()
# ai.learn()
params = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
bandwidth = cluster.estimate_bandwidth(ai.x, quantile=params['quantile'])
connectivity = kneighbors_graph(
ai.x, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
try:
algorithm.fit(ai.x)
except:
continue
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(numpy.int)
else:
y_pred = algorithm.predict(ai.x)
if max(y_pred) > 3:
continue
known_groups = {}
for i, group in enumerate(ai.y):
group = int(group)
if group not in known_groups:
known_groups[group] = []
known_groups[group].append(i)
guessed_groups = {}
for i, group in enumerate(y_pred):
if group not in guessed_groups:
guessed_groups[group] = []
guessed_groups[group].append(i)
for k in known_groups:
for g in guessed_groups:
print(
k, g, len(set(known_groups[k]).intersection(guessed_groups[g])))
# ai = AI()
# ai.learn("../testing/testdb.csv")
# ai.save("dGVzdGRi.find3.ai")
# ai.load("dGVzdGRi.find3.ai")
# a = json.load(open('../testing/testdb_single_rec.json'))
# classified = ai.classify(a)
# print(json.dumps(classified,indent=2))
|
RunnerUtils.py
|
#!/usr/bin/env python
"""
Job runner utils for both SGE jobs and local jobs.
"""
import subprocess
import logging
import time
import os
from multiprocessing.pool import ThreadPool
from pbcore.util.Process import backticks
from pbtranscript.ClusterOptions import SgeOptions
__author__ = 'etseng|[email protected]'
class SgeTimeOutException(Exception):
"""
SGE Time out exception which can be raised
when certain SGE jobs is timed out.
"""
def __init__(self, errmsg):
super(SgeTimeOutException).__init__(errmsg)
def write_cmd_to_script(cmd, script):
"""
Write a cmd or a list of cmds to a script file.
Parameters:
cmd - a cmd string or a list of cmds
script - a script file to save cmd/cmds
"""
with open(script, 'w') as writer:
writer.write("#!/bin/bash\n")
if isinstance(cmd, str):
writer.write(cmd + '\n')
elif isinstance(cmd, list):
writer.write("\n".join(cmd))
else:
assert False
def local_job_runner(cmds_list, num_threads, throw_error=True):
"""
Execute a list of cmds locally using thread pool with at most
num_threads threads, wait for all jobs to finish before exit.
If throw_error is True, when any job failed, raise RuntimeError.
If throw_error is False, return a list of cmds that failed.
Parameters:
cmds_list - cmds that will be executed in ThreadPool
num_threads - number of threads that will be used in the ThreadPool
throw_error - whether or not to throw RuntimeError when any of cmd failed.
rescue - whether or not to rescue this job
rescue_times - maximum number of rescue times
"""
run_cmd_in_shell = lambda x: backticks(x, merge_stderr=True)
try:
pool = ThreadPool(processes=num_threads)
rets = pool.map(run_cmd_in_shell, cmds_list)
pool.close()
pool.join()
except subprocess.CalledProcessError:
pass
failed_cmds = [cmds_list[i] for i in range(0, len(cmds_list)) if rets[i][1] != 0]
failed_cmds_out = [rets[i][0] for i in range(0, len(cmds_list)) if rets[i][1] != 0]
if throw_error and len(failed_cmds) > 0:
errmsg = "\n".join(["CMD failed: %s, %s" % (cmd, out)
for (cmd, out) in zip(failed_cmds, failed_cmds_out)])
raise RuntimeError(errmsg)
else:
return failed_cmds
def get_active_sge_jobs():
"""Return a dict of active sge job ids and their status by
calling qstat.
output - {jid: status}, e.g., {'199':'r', '200':'hqw'}
"""
try:
stuff = os.popen("qstat").read().strip().split('\n')
return dict({x.split()[0]: x.split()[4] for x in stuff[2:]})
except Exception as e:
raise RuntimeError("Unable to get active qsub jobs.", str(e))
def sge_submit(qsub_cmd, qsub_try_times=1):
"""
Submit qsub_cmd to sge and return sge job id as string.
Keep trying for at most {qsub_try_times} times until qsub succeeded.
Default, no retry.
Parameters:
qsub_cmd - a qsub cmd starting with 'qsub'
qsub_try_times - maximum try times
"""
assert qsub_cmd.startswith("qsub")
try_times = 1
while try_times <= qsub_try_times:
out, code, dummy_msg = backticks(qsub_cmd)
if code == 0: # succeeded, break
# Your job 596028 ("a.sh") has been submitted
return str(out).split()[2]
else:
# failed, sleep for a little, try again
time.sleep(5)
try_times += 1
raise RuntimeError("Unable to qsub CMD: {cmd}. Abort!:"
.format(cmd=qsub_cmd))
#def wait_for_sge_jobs(cmd, jids, timeout):
# """
# This replaces the original qsub -sync y -hold_jid j1,j2..... command
# which can still be hung if certain jobs got stuck.
#
# If timeout occurs, simply qdel all jids (ignoring whether they exist or not)
# and let the main function that calls it handle what to do
#
# Parameters:
# cmd - command waiting for
# jids - job ids that we are waiting for
# timeout - time out in seconds, delete all input jobs.
# """
# p = multiprocessing.Process(target=sge_submit, args=((cmd, 1),))
# p.start()
# p.join(timeout)
# if p.is_alive(): # timed out
# active_jids = get_active_sge_jobs().keys()
# while len(active_jids) > 0:
# for jid in active_jids:
# kill_cmd = "qdel " + str(jid)
# backticks(kill_cmd) # don't care whether it worked
# time.sleep(3) # wait for qdel to take effect...
# active_jids = get_active_sge_jids().keys()
# raise SgeException("TIMEOUT")
def kill_sge_jobs(jids):
"""Kill given sge jobs."""
for jid in jids:
kill_cmd = "qdel {jid}".format(jid=jid)
backticks(kill_cmd) # don't care whether it worked.
time.sleep(3) # wiat for qdel to take effect...
def wait_for_sge_jobs(jids, wait_timeout=None, run_timeout=None):
"""
Wait for all sge job ids {jids} to complete before exiting.
Return sge job ids that have been killed by qdel.
If wait_timeout is set, qdel all jobs regardless job status after
{wait_timeout} seconds have passed.
If wait_timeout is None, jobs can qw or held for a long time
when cluster is busy. If sge died and restarted, jobs will
no longer be active and wait_for_sge_jobs should be OK to exit,
however, in this case, upstream calls may not be aware of
jobs are not completed.
If run_timeout is set, qdel a job after it has been running for
{run_timeout} seconds.
If run_timeout is None, jobs can run forever unless wait_timeout is set.
Note that if both wait_timeout and run_timeout are set, qdel a job
when the earliest time out is reached.
Parameters:
jids - sge job ids that we are waiting for
wait_timeout - maximum time in seconds waiting for sge jobs,
regardless of their statuses. qdel it otherwise.
If is None, no cap.
run_timeout - maximum time in seconds that a sge job can be running,
not counting qw or hold time. qdel it otherwise.
If is None, no cap.
"""
count = 0
check_sge_every_n_seconds = 10 # check sge every n seconds.
time_passed = 0
runtime_passed = dict({jid: 0 for jid in jids})
killed_jobs = [] # jobs that have been killed.
while True:
active_d = get_active_sge_jobs()
not_done_jids = list(set(jids).intersection(set(active_d.keys())))
if len(not_done_jids) != 0:
# some sge jobs are still running or qw, or held
time.sleep(check_sge_every_n_seconds)
time_passed += check_sge_every_n_seconds
count += 1
if count % 100 == 0:
logging.debug("Waiting for sge job to complete: %s.",
",".join(not_done_jids))
if wait_timeout is not None and time_passed >= wait_timeout:
kill_sge_jobs(jids=not_done_jids)
killed_jobs.extend(not_done_jids)
break
if run_timeout is not None:
# update runtime_passed
for jid in not_done_jids:
if active_d[jid].startswith('r'):
runtime_passed[jid] += check_sge_every_n_seconds
to_kill_jids = [jid for jid in not_done_jids
if runtime_passed[jid] >= run_timeout]
kill_sge_jobs(jids=to_kill_jids)
killed_jobs.extend(to_kill_jids)
else:
break
return list(set(killed_jobs))
def sge_job_runner(cmds_list, script_files,
#done_script,
num_threads_per_job, sge_opts, qsub_try_times=3,
wait_timeout=600, run_timeout=600,
rescue=None, rescue_times=3):
"""
Write commands in cmds_list each to a file in script_files.
Qsub all scripts to sge, then qsub done_script which depends
on all previously submitted jobs to complete before it starts.
Parameters:
cmds_list - a list of commands to run
script_files - a list of script_files each saving a command in cmds_list
#done_script - run this script locall when all sge jobs are complete.
num_threads_per_job - number of cores required for each job
(e.g., qsub -pe smp {n})
sge_opts - sge options to submit sge jobs.
qsub_try_time - Retry if qsub failed
wait_timeout - maximum time in seconds passed before qdel all jobs.
run_timeout - maximum time in seconds allowing a sge job to be running
before qdel this job
rescue - whether or not to rescue a qdel-ed job.
None - no rescue
locally - yes, run it locally exactly once
sge - yes, run it through sge, try multiple times until suceed
rescue_times - maximum times of rescuing a qdel-ed job.
ToDo:
(1) if SGE fails at qsub, -- resubmit? wait? run local?
(2) add in ways to monitor if certain qsub jobs died or hung --- resubmit? kill? run local?
"""
assert isinstance(sge_opts, SgeOptions)
if len(cmds_list) != len(script_files):
raise ValueError("Number of commands and script files "
"passed to sge_job_runner must be the same.")
jids = []
jids_to_cmds = {}
jids_to_scripts = {}
for cmd, script in zip(cmds_list, script_files):
if run_timeout is not None and not cmd.startswith("timeout"):
cmd = "timeout %d %s" % (run_timeout, cmd)
write_cmd_to_script(cmd=cmd, script=script)
qsub_cmd = sge_opts.qsub_cmd(script=script, num_threads=num_threads_per_job,
elog=script+".elog", olog=script+".olog")
jid = sge_submit(qsub_cmd=qsub_cmd, qsub_try_times=qsub_try_times)
jids.append(jid)
jids_to_cmds[jid] = cmd
jids_to_scripts[jid] = script
# We used to submit a done job which waits for all previous submitted
# sge jobs to complete using 'qsub -hold_jid'. This is deprecated because:
# 1. some non-SGE clusters may not support -hold_jid option
# 2. we prefer a timeout schema. Sometimes one job may be indefinitley
# stuck on a node (becuz that node is zombied or used up by another job),
# in this case, the job simply sits there FOREVER. We would rather it kill
# off the qsub jobs that goes over the timeout and retry.
#
# Replace 'qsub -hold_jid' by wait_for_sge_jobs with timeout.
killed_jobs = wait_for_sge_jobs(jids=jids, wait_timeout=wait_timeout,
run_timeout=run_timeout)
killed_cmds = [jids_to_cmds[jid] for jid in killed_jobs]
killed_scripts = [jids_to_scripts[jid] for jid in killed_jobs]
if rescue is None or rescue_times <= 0:
return zip(killed_cmds, killed_scripts)
elif rescue == "locally": # retry at most once if running locally
ret = []
for killed_cmd, killed_script in zip(killed_cmds, killed_scripts):
failed = (len(local_job_runner(cmds_list=[killed_cmd],
num_threads=num_threads_per_job,
throw_error=False)) != 0)
if failed:
ret.append((killed_cmd, killed_script))
return ret
elif rescue == "sge":
return sge_job_runner(cmds_list=[], script_files=[],
num_threads_per_job=num_threads_per_job,
sge_opts=sge_opts, qsub_try_times=qsub_try_times,
wait_timeout=wait_timeout, run_timeout=run_timeout,
rescue=rescue, rescue_times=(rescue_times-1))
else:
raise ValueError("Unable to recognize rescue type {r}.".format(r=rescue))
|
test_pipeline_process.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import traceback
from django.test import TestCase
from pipeline.django_signal_valve import valve
from pipeline.engine import states, signals, exceptions
from pipeline.engine.models import Status
from pipeline.engine.utils import Stack
from pipeline.engine.models.core import PipelineProcess, ProcessSnapshot, SubProcessRelationship
from ..mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
valve.unload_valve_function()
class TestPipelineProcess(TestCase):
def test_prepare_for_pipeline(self):
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
self.assertEqual(len(process.id), 32)
self.assertEqual(process.root_pipeline_id, pipeline.id)
self.assertEqual(process.current_node_id, pipeline.start_event.id)
self.assertIsNotNone(process.snapshot)
self.assertEqual(process.top_pipeline.id, pipeline.id)
def test_fork_child(self):
context = MockContext()
context.clear_change_keys = MagicMock()
pipeline = PipelineObject(context=context)
current_node_id = uniqid()
destination_id = uniqid()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
child = PipelineProcess.objects.fork_child(
parent=process,
current_node_id=current_node_id,
destination_id=destination_id
)
self.assertEqual(len(child.id), 32)
self.assertEqual(process.root_pipeline_id, child.root_pipeline_id)
self.assertEqual(process.pipeline_stack, child.pipeline_stack)
self.assertEqual(process.children, child.children)
self.assertEqual(process.root_pipeline.id, child.root_pipeline.id)
self.assertEqual(process.subprocess_stack, child.subprocess_stack)
self.assertEqual(process.id, child.parent_id)
self.assertEqual(child.current_node_id, current_node_id)
self.assertEqual(child.destination_id, destination_id)
self.assertEqual(context.clear_change_keys.call_count, 1)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_process_ready(self):
from pipeline.django_signal_valve.valve import send
process_id = uniqid()
current_node_id = uniqid()
PipelineProcess.objects.process_ready(process_id)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=None,
call_from_child=False)
PipelineProcess.objects.process_ready(process_id, current_node_id, False)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=current_node_id,
call_from_child=False)
PipelineProcess.objects.process_ready(process_id, current_node_id, True)
send.assert_called_with(
signals,
'process_ready',
sender=PipelineProcess,
process_id=process_id,
current_node_id=current_node_id,
call_from_child=True)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_batch_process_ready(self):
from pipeline.django_signal_valve.valve import send
process_id_list = [uniqid(), uniqid(), uniqid()]
pipeline_id = uniqid()
PipelineProcess.objects.batch_process_ready(process_id_list, pipeline_id)
send.assert_called_with(
signals,
'batch_process_ready',
sender=PipelineProcess,
process_id_list=process_id_list,
pipeline_id=pipeline_id
)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_child_process_ready(self):
from pipeline.django_signal_valve.valve import send
child_id = uniqid()
PipelineProcess.objects.child_process_ready(child_id)
send.assert_called_with(
signals,
'child_process_ready',
sender=PipelineProcess,
child_id=child_id
)
def test_properties(self):
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
self.assertEqual(process.pipeline_stack, pipeline_stack)
self.assertEqual(process.children, children)
self.assertEqual(process.root_pipeline, root_pipeline)
self.assertEqual(process.top_pipeline, pipeline_stack.top())
self.assertEqual(process.subprocess_stack, subprocess_stack)
def test_push_pipeline(self):
pipeline = 'pipeline_%s' % uniqid()
subproc_pipeline = PipelineObject()
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
process.id = uniqid()
process.push_pipeline(pipeline, is_subprocess=False)
self.assertEqual(process.top_pipeline, pipeline)
process.push_pipeline(subproc_pipeline, is_subprocess=True)
self.assertEqual(process.top_pipeline, subproc_pipeline)
self.assertTrue(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists())
def test_pop_pipeline(self):
subproc_pipeline = PipelineObject()
process = PipelineProcess.objects.create()
pipeline_stack = Stack(['pipeline1', 'pipeline2'])
subprocess_stack = Stack(['subprocess1', 'subprocess2'])
children = ['child1', 'child2']
root_pipeline = 'root_pipeline'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=pipeline_stack,
children=children,
root_pipeline=root_pipeline,
subprocess_stack=subprocess_stack
)
process.snapshot = mock_snapshot
process.id = uniqid()
process.push_pipeline(subproc_pipeline, is_subprocess=True)
self.assertEqual(process.top_pipeline, subproc_pipeline)
self.assertTrue(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists())
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline.id, subproc_pipeline.id)
self.assertFalse(
SubProcessRelationship.objects.filter(subprocess_id=subproc_pipeline.id, process_id=process.id).exists()
)
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline, 'pipeline2')
pop_pipeline = process.pop_pipeline()
self.assertEqual(pop_pipeline, 'pipeline1')
def test_join(self):
children = [IdentifyObject(), IdentifyObject(), IdentifyObject()]
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline='root_pipeline',
subprocess_stack=Stack()
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.join(children)
self.assertEqual(process.need_ack, len(children))
for i in range(len(children)):
self.assertEqual(process.children[i], children[i].id)
def test_root_sleep_check(self):
def return_suspended(*args, **kwargs):
return states.SUSPENDED
def return_revoked(*args, **kwargs):
return states.REVOKED
def return_blocked(*args, **kwargs):
return states.BLOCKED
another_status = MagicMock()
status = [states.CREATED, states.READY, states.RUNNING, states.FINISHED, states.FAILED]
another_status.side_effect = status
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack()
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
self.assertEqual(process.root_sleep_check(), (True, states.SUSPENDED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_revoked):
self.assertEqual(process.root_sleep_check(), (True, states.REVOKED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_blocked):
self.assertEqual(process.root_sleep_check(), (True, states.BLOCKED))
process.parent_id = 'parent_id'
self.assertEqual(process.root_sleep_check(), (False, states.BLOCKED))
with mock.patch(PIPELINE_STATUS_STATE_FOR, another_status):
for s in status:
self.assertEqual(process.root_sleep_check(), (False, s))
def test_subproc_sleep_check(self):
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([1, 2, 3, 4])
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
def return_all_running(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_one_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.SUSPENDED),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_first_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.SUSPENDED),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.RUNNING)
]
def return_last_suspended(*args, **kwargs):
return [
StatusObject(id=1, state=states.RUNNING),
StatusObject(id=2, state=states.RUNNING),
StatusObject(id=3, state=states.RUNNING),
StatusObject(id=4, state=states.SUSPENDED)
]
with mock.patch(PIPELINE_STATUS_FILTER, return_all_running):
self.assertEqual(process.subproc_sleep_check(), (False, [1, 2, 3, 4]))
with mock.patch(PIPELINE_STATUS_FILTER, return_one_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, [1]))
with mock.patch(PIPELINE_STATUS_FILTER, return_first_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, []))
with mock.patch(PIPELINE_STATUS_FILTER, return_last_suspended):
self.assertEqual(process.subproc_sleep_check(), (True, [1, 2, 3]))
@patch(PIPELINE_CELERYTASK_UNBIND, MagicMock())
def test_freeze(self):
from pipeline.engine.models import ProcessCeleryTask
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
self.assertFalse(process.is_frozen)
process.freeze()
self.assertTrue(process.is_frozen)
process.refresh_from_db()
self.assertTrue(process.is_frozen)
ProcessCeleryTask.objects.unbind.assert_called_with(process.id)
@patch(SIGNAL_VALVE_SEND, MagicMock())
def test_unfreeze(self):
from pipeline.django_signal_valve.valve import send
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
process.freeze()
process.unfreeze()
self.assertFalse(process.is_frozen)
process.refresh_from_db()
self.assertFalse(process.is_frozen)
send.assert_called_with(
signals,
'process_unfreeze',
sender=PipelineProcess,
process_id=process.id
)
@patch(PIPELINE_PROCESS_ADJUST_STATUS, MagicMock())
@patch(PIPELINE_CELERYTASK_UNBIND, MagicMock())
def test_sleep(self):
from pipeline.engine.models import ProcessCeleryTask
pipeline = PipelineObject()
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
process.sleep(do_not_save=True, adjust_status=True)
process.adjust_status.assert_called_with(None)
ProcessCeleryTask.objects.unbind.assert_not_called()
process.adjust_status.reset_mock()
process.sleep(do_not_save=True, adjust_status=True, adjust_scope=[1, 2, 3, 4])
process.adjust_status.assert_called_with([1, 2, 3, 4])
ProcessCeleryTask.objects.unbind.assert_not_called()
process.adjust_status.reset_mock()
process.sleep(do_not_save=False, adjust_status=False)
process.adjust_status.assert_not_called()
self.assertTrue(process.sleep)
ProcessCeleryTask.objects.unbind.assert_called_with(process.id)
with mock.patch(PIPELINE_PROCESS_CHILD_PROCESS_READY, MagicMock()):
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
process.sleep(do_not_save=False, adjust_status=False)
PipelineProcess.objects.child_process_ready.assert_has_calls([
mock.call(1),
mock.call(2),
mock.call(3),
mock.call(4)
])
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
@patch(PIPELINE_STATUS_TRANSIT, MagicMock())
def test_adjust_status(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(id='root_pipeline_id'),
subprocess_stack=Stack([1, 2, 3, 4])
)
process.snapshot = mock_snapshot
process.current_node_id = 'current_node_id'
def return_suspended_for_node(id, may_not_exist=False):
if id == 'current_node_id':
return states.SUSPENDED
def return_failed_for_node(id, may_not_exist=False):
if id == 'current_node_id':
return states.FAILED
def return_suspended_for_root_pipeline(id, may_not_exist=False):
if id == 'root_pipeline_id':
return states.SUSPENDED
def return_none_for_node(*args, **kwargs):
return None
def return_empty_list_for_subproc(subprocess_stack):
return []
def return_all_running_for_subproc(subprocess_stack):
return [states.RUNNING, states.RUNNING, states.RUNNING, states.RUNNING]
def return_last_suspended_for_subproc(subprocess_stack):
return [states.RUNNING, states.RUNNING, states.RUNNING, states.SUSPENDED]
def return_one_suspended_for_subproc(subprocess_stack):
return [states.RUNNING, states.SUSPENDED, states.RUNNING, states.RUNNING]
node_state_possibility = [return_suspended_for_node, return_failed_for_node]
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_empty_list_for_subproc):
for case in node_state_possibility:
with mock.patch(PIPELINE_STATUS_STATE_FOR, case):
process.adjust_status()
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3, 4],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.transit.assert_called_with(
'root_pipeline_id',
to_state=states.BLOCKED,
is_pipeline=True
)
Status.objects.batch_transit.reset_mock()
Status.objects.transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended_for_root_pipeline):
process.adjust_status()
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3, 4],
state=states.SUSPENDED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_none_for_node):
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_all_running_for_subproc):
process.adjust_status()
Status.objects.batch_transit.assert_not_called()
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_last_suspended_for_subproc):
process.adjust_status(adjust_scope=[1, 2, 3])
Status.objects.batch_transit.assert_called_with(
id_list=[1, 2, 3],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
with mock.patch(PIPELINE_STATUS_STATES_FOR, return_one_suspended_for_subproc):
process.adjust_status(adjust_scope=[1])
Status.objects.batch_transit.assert_called_with(
id_list=[1],
state=states.BLOCKED,
from_state=states.RUNNING
)
Status.objects.batch_transit.reset_mock()
def test_wake_up(self):
process = PipelineProcess.objects.create()
process.is_sleep = True
process.save()
self.assertTrue(process.is_sleep)
process.wake_up()
self.assertFalse(process.is_sleep)
@patch(PIPELINE_CELERYTASK_DESTROY, MagicMock())
def test_destroy(self):
from pipeline.engine.models import ProcessCeleryTask
process = PipelineProcess.objects.create()
process.id = uniqid()
process.current_node_id = 'current_node_id'
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
mock_snapshot.delete = MagicMock()
process.snapshot = mock_snapshot
process.destroy()
self.assertFalse(process.is_alive)
self.assertEqual(process.current_node_id, '')
self.assertIsNone(process.snapshot)
mock_snapshot.delete.assert_called()
ProcessCeleryTask.objects.destroy.assert_called_with(process.id)
def test_save(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3, 4],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
mock_snapshot.save = MagicMock()
process.snapshot = mock_snapshot
process.save(save_snapshot=False)
mock_snapshot.save.assert_not_called()
process.save(save_snapshot=True)
mock_snapshot.save.assert_called()
mock_snapshot.save.reset_mock()
process.save()
mock_snapshot.save.assert_called()
def test_blocked_by_failure_or_suspended(self):
process = PipelineProcess.objects.create()
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
def return_suspended(*args, **kwargs):
return states.SUSPENDED
def return_failed(*args, **kwargs):
return states.FAILED
def return_none(*args, **kwargs):
return None
class MockChild(object):
def __init__(self, failed=False, suspended=False):
self.failed = failed
self.suspended = suspended
def blocked_by_failure_or_suspended(self):
return self.failed or self.suspended
def return_child_no_anomaly(*args, **kwargs):
return [MockChild(), MockChild(), MockChild()]
def return_child_has_failed(*args, **kwargs):
return [MockChild(), MockChild(), MockChild(failed=True)]
def return_child_has_suspended(*args, **kwargs):
return [MockChild(), MockChild(), MockChild(suspended=True)]
process.is_sleep = False
self.assertFalse(process.blocked_by_failure_or_suspended())
# 当前节点已经执行失败
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_failed):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 当前节点被暂停
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_suspended):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 整个流程进入了 SUSPENDED 状态,未开始执行下一个节点
with mock.patch(PIPELINE_STATUS_STATE_FOR, return_none):
process.is_sleep = True
self.assertFalse(process.blocked_by_failure_or_suspended())
mock_snapshot = ProcessSnapshot.objects.create_snapshot(
pipeline_stack=Stack(),
children=[1, 2, 3],
root_pipeline=IdentifyObject(),
subprocess_stack=Stack([])
)
process.snapshot = mock_snapshot
# 子进程都没有异常
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_no_anomaly):
process.is_sleep = True
self.assertFalse(process.blocked_by_failure_or_suspended())
# 子进程中存在失败的进程
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_has_failed):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
# 子进程中存在暂停的进程
with mock.patch(PIPELINE_PROCESS_FILTER, return_child_has_suspended):
process.is_sleep = True
self.assertTrue(process.blocked_by_failure_or_suspended())
def test_sync_with_children(self):
outputs = {'output_key': 'output_value'}
variables = {'variable_key': 'varaiable_value'}
process = PipelineProcess.objects.create()
context = Object()
context.update_global_var = MagicMock()
context.sync_change = MagicMock()
data = Object()
data.update_outputs = MagicMock()
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([PipelineObject(context=context, data=data)]),
'_children': [1, 2, 3, 4],
'_root_pipeline': IdentifyObject(),
'_subprocess_stack': Stack([])
}
)
process.snapshot = mock_snapshot
process.clean_children = MagicMock()
def return_none(*args, **kwargs):
return None
def return_mock(id):
if id.endswith('data'):
return DataObject(outputs=outputs)
if id.endswith('context'):
return ContextObject(variables=variables)
with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_none):
self.assertRaises(exceptions.ChildDataSyncError, process.sync_with_children)
with mock.patch(PIPELINE_ENGINE_CORE_DATA_GET_OBJECT, return_mock):
process.sync_with_children()
context.sync_change.assert_called()
data.update_outputs.assert_called_with(outputs)
process.clean_children.assert_called()
@patch(PIPELINE_ENGINE_CORE_DATA_SET_OBJECT, MagicMock())
@patch(PIPELINE_PROCESS_BLOCKED_BY_FAILURE, MagicMock())
@patch(PIPELINE_PROCESS_DESTROY, MagicMock())
@patch(PIPELINE_PROCESS_PROCESS_READY, MagicMock())
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
@patch(PIPELINE_STATUS_TRANSIT, MagicMock())
def test_destroy_and_wake_up_parent(self):
context = MockContext()
context.clear_change_keys = MagicMock()
pipeline = PipelineObject(context=context)
process = PipelineProcess.objects.prepare_for_pipeline(pipeline)
children = []
for i in range(3):
children.append(process.__class__.objects.fork_child(process, 'current_node_id', 'destination_id'))
process.join(children)
# def worker(child):
# child.destroy_and_wake_up_parent(child.destination_id)
for child in children:
child.destroy_and_wake_up_parent(child.destination_id)
# sys_processes.append(Process(target=worker, args=(child,)))
# for p in sys_processes:
# p.start()
#
# for p in sys_processes:
# p.join()
process.refresh_from_db()
self.assertEqual(process.need_ack, -1)
self.assertEqual(process.ack_num, 0)
self.assertEqual(PipelineProcess.blocked_by_failure_or_suspended.call_count, 2)
PipelineProcess.objects.process_ready.assert_called_once()
self.assertEqual(PipelineProcess.destroy.call_count, 3)
def test__context_key(self):
process = PipelineProcess.objects.create()
process.id = uniqid()
self.assertEqual(process._context_key(), '{}_context'.format(process.id))
self.assertEqual(process._context_key(process_id='another_id'), '{}_context'.format('another_id'))
def test__data_key(self):
process = PipelineProcess.objects.create()
process.id = uniqid()
self.assertEqual(process._data_key(), '{}_data'.format(process.id))
self.assertEqual(process._data_key(process_id='another_id'), '{}_data'.format('another_id'))
def test_can_be_waked(self):
process = PipelineProcess.objects.create()
process.is_sleep = False
process.is_alive = False
self.assertFalse(process.can_be_waked())
process.is_sleep = True
process.is_alive = False
self.assertFalse(process.can_be_waked())
process.is_sleep = False
process.is_alive = True
self.assertFalse(process.can_be_waked())
process.is_sleep = True
process.is_alive = True
process.need_ack = 3
process.ack_num = 2
self.assertFalse(process.can_be_waked())
process.need_ack = 3
process.ack_num = 3
self.assertTrue(process.can_be_waked())
process.need_ack = -1
self.assertTrue(process.can_be_waked())
@patch(PIPELINE_ENGINE_CORE_DATA_DEL_OBJECT, MagicMock())
def test_clean_children(self):
from pipeline.engine.core.data import del_object
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': ['1', '2', '3'],
'_root_pipeline': IdentifyObject(),
'_subprocess_stack': Stack([])
}
)
mock_snapshot.clean_children = MagicMock()
mock_snapshot.save = MagicMock()
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.clean_children()
del_object.assert_has_calls([
mock.call(process._context_key('1')),
mock.call(process._data_key('1')),
mock.call(process._context_key('2')),
mock.call(process._data_key('2')),
mock.call(process._context_key('3')),
mock.call(process._data_key('3')),
])
mock_snapshot.clean_children.assert_called()
mock_snapshot.save.assert_called()
@patch(PIPELINE_STATUS_FAIL, MagicMock())
@patch(PIPELINE_STATUS_RAW_FAIL, MagicMock())
def test_exit_gracefully(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': ['1', '2', '3'],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([])
}
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.sleep = MagicMock()
e = Exception('test')
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
Status.objects.raw_fail.assert_not_called()
process.sleep.assert_called_with(adjust_status=True)
Status.objects.fail.reset_mock()
process.sleep.reset_mock()
# when stack is not empty
mock_snapshot.data['_pipeline_stack'] = Stack([PipelineObject()])
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
Status.objects.raw_fail.assert_not_called()
process.sleep.assert_called_with(adjust_status=True)
Status.objects.fail.reset_mock()
process.sleep.reset_mock()
# when current_node is none
top_pipeline = PipelineObject()
top_pipeline.node = MagicMock(return_value=None)
mock_snapshot.data['_pipeline_stack'] = Stack([top_pipeline])
process.current_node_id = uniqid()
process.exit_gracefully(e)
Status.objects.fail.assert_not_called()
Status.objects.raw_fail.assert_called_with(process.current_node_id, ex_data=traceback.format_exc(e))
process.sleep.assert_called_with(adjust_status=True)
def test_refresh_current_node(self):
node_id = uniqid()
process = PipelineProcess.objects.create()
process.refresh_current_node(node_id)
process.refresh_from_db()
self.assertEqual(process.current_node_id, node_id)
@patch(PIPELINE_STATUS_BATCH_TRANSIT, MagicMock())
def test_revoke_subprocess(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': [],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([1, 2, 3, 4])
}
)
process = PipelineProcess.objects.create(id=uniqid())
process.snapshot = mock_snapshot
process.sleep = MagicMock()
process.revoke_subprocess()
Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)
child_1 = Object()
child_2 = Object()
child_3 = Object()
child_1.revoke_subprocess = MagicMock()
child_2.revoke_subprocess = MagicMock()
child_3.revoke_subprocess = MagicMock()
def get_child(id):
return {
1: child_1,
2: child_2,
3: child_3
}[id]
mock_snapshot.data['_children'] = [1, 2, 3]
with mock.patch(PIPELINE_PROCESS_GET, get_child):
process.revoke_subprocess()
Status.objects.batch_transit.assert_called_with(id_list=[1, 2, 3, 4], state=states.REVOKED)
child_1.revoke_subprocess.assert_called()
child_2.revoke_subprocess.assert_called()
child_3.revoke_subprocess.assert_called()
# test when subprocess_stack and children return None
process = PipelineProcess.objects.create(id=uniqid())
self.assertIsNone(process.subprocess_stack)
self.assertIsNone(process.children)
process.revoke_subprocess()
@patch(PIPELINE_PROCESS_DESTROY, MagicMock())
def test_destroy_all(self):
mock_snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack(),
'_children': [],
'_root_pipeline': PipelineObject(),
'_subprocess_stack': Stack([])
}
)
process = PipelineProcess.objects.create()
process.snapshot = mock_snapshot
process.is_alive = False
process.destroy_all()
process.destroy.assert_not_called()
process.is_alive = True
process.destroy_all()
process.destroy.assert_called()
process.destroy.reset_mock()
mock_snapshot.data['_children'] = [1, 2, 3]
child_1 = Object()
child_1.children = []
child_1.destroy = MagicMock()
child_1.is_alive = True
child_2 = Object()
child_2.children = []
child_2.destroy = MagicMock()
child_2.is_alive = False
child_3 = Object()
child_3.children = [1]
child_3.destroy = MagicMock()
child_3.is_alive = True
def get_child(id):
return {
1: child_1,
2: child_2,
3: child_3
}[id]
with mock.patch(PIPELINE_PROCESS_GET, get_child):
process.destroy_all()
child_1.destroy.assert_called()
child_2.destroy.assert_not_called()
child_3.destroy.assert_called()
self.assertEqual(child_1.destroy.call_count, 2)
def test_in_subprocess__true(self):
snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([1, 2]),
}
)
process = PipelineProcess()
process.snapshot = snapshot
self.assertTrue(process.in_subprocess)
def test_in_subprocess__false(self):
snapshot = ProcessSnapshot(
data={
'_pipeline_stack': Stack([1]),
}
)
process = PipelineProcess()
process.snapshot = snapshot
self.assertFalse(process.in_subprocess)
|
mttcp_serv.py
|
import socket
import threading
from time import strftime
class TcpTimeServer:
def __init__(self, host='', port=12345):
self.addr = (host, port)
self.serv = socket.socket()
self.serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serv.bind(self.addr)
self.serv.listen(1)
def chat(self, c_sock):
while True:
data = c_sock.recv(1024)
if data.strip() == b'quit':
break
data = '[%s] %s' % (strftime('%H:%M:%S'), data.decode('utf8'))
c_sock.send(data.encode('utf8'))
c_sock.close()
def mainloop(self):
while True:
cli_sock, cli_addr = self.serv.accept()
t = threading.Thread(target=self.chat, args=(cli_sock,))
t.start()
self.serv.close()
if __name__ == '__main__':
s = TcpTimeServer()
s.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.